import numpy as np

# 神经网络模型
class NeuralNetwork:
    def __init__(self):
        # 由于是两个整数相加，输入特征为2个，输出为1个结果
        # 权重初始化为1，偏置初始化为0
        self.weights = np.ones((2, 1))
        self.bias = 0

    def forward_propagation(self, x):
        # 前向传播，直接计算输入与权重乘积加上偏置
        return np.dot(x, self.weights) + self.bias

    def train(self, X, y, epochs, learning_rate):
        for epoch in range(epochs):
            # 前向传播得到预测输出
            output = self.forward_propagation(X)
            # 计算误差
            error = y - output
            # 由于我们初始化权重为1，偏置为0，这里实际上不需要更新权重和偏置
            # 但是为了演示，我们还是打印出误差
            loss = np.mean(np.square(error))
            print(f"Epoch {epoch + 1}/{epochs}, Loss: {loss}")

# 创建神经网络实例
neural_network = NeuralNetwork()

# 创建1000对随机整数作为输入
num_samples = 1000
X_train = np.random.randint(0, 100, size=(num_samples, 2))
y_train = X_train[:, 0] + X_train[:, 1]

# 训练神经网络，设置训练轮数和学习率
epochs = 100
learning_rate = 0.01
neural_network.train(X_train, y_train.reshape(num_samples, 1), epochs, learning_rate)

# 测试神经网络
test_input = np.array([[1, 2], [3, 4]])
predicted_sum = neural_network.forward_propagation(test_input)

print("Predicted sums:")
print(predicted_sum)