import numpy as np

# 激活函数及其导数
def relu(x):
    return np.maximum(0, x)

def relu_derivative(x):
    return (x > 0).astype(float)

def identity_function(x):
    return x

def identity_derivative(x):
    return 1

# 神经网络模型
class SimpleNeuralNetwork:
    def __init__(self, input_nodes, hidden_nodes, output_nodes):
        self.input_nodes = input_nodes
        self.hidden_nodes = hidden_nodes
        self.output_nodes = output_nodes

        # 初始化权重和偏置
        self.weights_input_to_hidden = np.random.randn(self.input_nodes, self.hidden_nodes) * 0.01
        self.weights_hidden_to_output = np.random.randn(self.hidden_nodes, self.output_nodes) * 0.01
        self.bias_hidden = np.zeros(self.hidden_nodes)
        self.bias_output = np.zeros(self.output_nodes)

    def feedforward(self, X):
        self.hidden_layer = relu(np.dot(X, self.weights_input_to_hidden) + self.bias_hidden)
        self.output_layer = identity_function(np.dot(self.hidden_layer, self.weights_hidden_to_output) + self.bias_output)
        return self.output_layer

    def train(self, X, y, epochs, learning_rate, batch_size):
        num_samples = X.shape[0]
        for epoch in range(epochs):
            indices = np.arange(num_samples)
            np.random.shuffle(indices)
            X = X[indices]
            y = y[indices]

            total_loss = 0
            for i in range(0, num_samples, batch_size):
                batch_X = X[i:i+batch_size]
                batch_y = y[i:i+batch_size]

                output = self.feedforward(batch_X)
                error = batch_y - output
                loss = np.mean(np.square(error))  # 计算均方误差
                total_loss += loss

                output_delta = error * identity_derivative(output)
                hidden_error = output_delta.dot(self.weights_hidden_to_output.T)
                hidden_delta = hidden_error * relu_derivative(self.hidden_layer)

                self.weights_hidden_to_output += np.dot(self.hidden_layer.T, output_delta) * learning_rate
                self.weights_input_to_hidden += np.dot(batch_X.T, hidden_delta) * learning_rate
                self.bias_hidden += np.sum(hidden_delta, axis=0) * learning_rate
                self.bias_output += np.sum(output_delta) * learning_rate

            # 打印每个epoch的损失
            print(f"Epoch {epoch+1}/{epochs}, Loss: {total_loss / (num_samples // batch_size)}")

# 创建神经网络实例
input_nodes = 2
hidden_nodes = 8
output_nodes = 1

neural_network = SimpleNeuralNetwork(input_nodes, hidden_nodes, output_nodes)

# 创建1000对随机数字作为输入
num_samples = 1000
X_train = np.random.rand(num_samples, 2)
y_train = X_train[:, 0] + X_train[:, 1]

# 训练神经网络
epochs = 100
learning_rate = 0.001
batch_size = 32
neural_network.train(X_train, y_train.reshape(num_samples, 1), epochs, learning_rate, batch_size)

# 测试神经网络
test_input = np.array([[1, 2], [0.3, 0.4]])
predicted_sum = neural_network.feedforward(test_input)

print("Predicted sums:")
print(predicted_sum)