It's just a dot product plus a bias.
# 1. Calculate Weighted Sum
linear_output = dot(inputs, weights) + bias
# 2. Apply Activation
final_output = sigmoid(linear_output)A raw Python implementation without libraries.
class Neuron:
def __init__(self, size):
self.weights = [0.0] * size
self.bias = 0.0
def forward(self, inputs):
# Sum inputs * weights
total = sum(i * w for i, w in zip(inputs, self.weights))
# Add bias
total += self.bias
# Activation
return 1.0 / (1.0 + math.exp(-total))