import numpy as  np

def sigmoid(x):
    # our activation fucntion: f(x) = 1/(1+e^(-x))
    # np.exp(-x): 它计算了自然数 e 的 −x 次幂
    return 1 / (1 + np.exp(-x))

class Neuron:
    # weight: 权重    bias: 偏置
    def __init__(self,weights,bias):
        self.weights = weights
        self.bias = bias

    # Feedforward用于描述一个从输入层经过隐藏层到输出层的计算过程，其中每一层的输出都是下一层的输入。
    def feedforward(self,inputs):
        total = np.dot(self.weights,inputs) + self.bias
        print(total) # 0*2 + 1*3 +4 =7   (0*2 + 1*3 为weights和x的点积)
        return sigmoid(total)

weights = np.array([0,1])  # w1 = 0,w2 = 1
bias = 4
n = Neuron(weights,bias)

x = np.array([2,3])
print(n.feedforward(x))

