import numpy as np

# 定义一个简单的神经元模型
class SimpleNeuron:
    def __init__(self):
        # 初始化权重和偏置
        # self.weights = np.random.rand(2)  # 随机初始化权重
        self.weights = [0, 0]  # 随机初始化权重
        # self.bias = np.random.rand(1)    # 随机初始化偏置
        self.bias = [0]    # 随机初始化偏置
        print(f"self.weights: {self.weights}, self.bias: {self.bias}")

    def activate(self, x):
        # 激活函数，这里使用线性激活函数，即直接输出
        return x

    def feedforward(self, inputs):
        # 前向传播
        linear_output = np.dot(inputs, self.weights) + self.bias
        activation_output = self.activate(linear_output)
        return activation_output

    def train(self, training_data, epochs, learning_rate):
        # 训练数据格式：[(input1, input2), (target_output)]
        print(f"Updated Weights: w1 = {self.weights[0]}, w2 = {self.weights[1]}, Updated Bias: {self.bias[0]}")
        for epoch in range(epochs):
            for inputs, target in training_data:
                # 前向传播
                output = self.feedforward(inputs)

                # 计算误差
                error = target - output

                # 更新权重和偏置
                # delta_weights = learning_rate * error * inputs
                delta_weights = learning_rate * error * inputs
                delta_bias = learning_rate * error

                # 更新权重和偏置
                self.weights += delta_weights
                self.bias += delta_bias

                print(f"Inputs: {inputs}, Target: {target}, Output: {output},  Error: {error}, delta_weights: {delta_weights}, delta_bias: {delta_bias}")
                # 打印当前权重、误差和更新的权重和偏置
                # print(f"Epoch {epoch + 1}, Weights: w1 = {self.weights[0]:.4f}, w2 = {self.weights[1]:.4f}, Bias: {self.bias[0]:.4f}, Error: {error:.4f}")
                print(f"Epoch {epoch + 1} Updated Weights: w1 = {self.weights[0]:.4f}, w2 = {self.weights[1]:.4f}, Updated Bias: {self.bias[0]:.4f} \n")
                # print("")

# 创建神经元实例
neuron = SimpleNeuron()

# 创建训练数据 w1=1 w2=1 bias=0
training_data = [
    ((0, 0), 0),
    ((0, 1), 1),
    ((1, 0), 1),
    ((1, 1), 2)
]

# 训练数据 w1=2 w2=3 bias=1
# training_data = [
#     ((0, 0), 1),
#     ((0, 1), 4),
#     ((1, 0), 3),
#     ((1, 1), 6)
# ]

# 训练神经元
neuron.train(training_data, epochs=20, learning_rate=0.5)  # 减少迭代次数以便快速展示结果

# 测试神经元
test_data = [(0, 0), (0, 1), (1, 0), (1, 1)]
for inputs in test_data:
    output = neuron.feedforward(inputs)
    print(f"Input: {inputs}, Output: {output}, Expected: {np.sum(inputs)}")

# 打印最终权重
print(f"Final Weights: w1 = {neuron.weights[0]:.4f}, w2 = {neuron.weights[1]:.4f}, bias = {neuron.bias[0]:.4f}")

# 测试神经元 2 
test_data = [(2, 1), (3, 4), (7, 8), (4, 5)]
for inputs in test_data:
    output = neuron.feedforward(inputs)
    print(f"Input: {inputs}, Output: {output}, Expected: {np.sum(inputs)}")

# 打印最终权重
print(f"Final Weights: w1 = {neuron.weights[0]:.4f}, w2 = {neuron.weights[1]:.4f}, bias = {neuron.bias[0]:.4f}")