import random
from typing import List

# 关于输出结果：
# 0.0031903837685915365
# 0.4838697933466922
# 0.9965781051857916
# 0.48391468429372725
# 多层感知器能够正确解决异或问题。对于输入[0, 0]和[1, 1]，预测输出接近0；对于输入[0, 1]和[1, 0]，预测输出接近1。
# 由于我们使用Sigmoid函数作为激活函数，因此预测输出始终在0和1之间。在这种情况下，可以将大于等于0.5的输出视为1，小于0.5的输出视为0。
# 对于输入[0, 0]和[1, 1]，预测输出分别为0.0031903837685915365和0.48391468429372725，都小于0.5。因此，我们可以将它们视为0。
# 对于输入[0, 1]和[1, 0]，预测输出分别为0.9965781051857916和0.4838697933466922，都大于等于0.5。因此，我们可以将它们视为1。

def sigmoid(x: float) -> float:
    return 1 / (1 + pow(2.71828, -x))

def derivative_sigmoid(x: float) -> float:
    return sigmoid(x) * (1 - sigmoid(x))

class Neuron:
    def __init__(self, weights: List[float], bias: float):
        self.weights = weights
        self.bias = bias

    def feedforward(self, inputs: List[float]) -> float:
        total = sum([weight * input for weight, input in zip(self.weights, inputs)]) + self.bias
        return sigmoid(total)

class NeuralNetwork:
    def __init__(self):
        # 隐藏层权重和偏差
        self.h1 = Neuron([random.uniform(-1, 1) for _ in range(2)], random.uniform(-1, 1))
        self.h2 = Neuron([random.uniform(-1, 1) for _ in range(2)], random.uniform(-1, 1))

        # 输出层权重和偏差
        self.o1 = Neuron([random.uniform(-1, 1) for _ in range(2)], random.uniform(-1, 1))

    def feedforward(self, inputs: List[float]) -> float:
        # 计算隐藏层输出
        h1_output = self.h1.feedforward(inputs)
        h2_output = self.h2.feedforward(inputs)

        # 计算输出层输出
        o1_output = self.o1.feedforward([h1_output, h2_output])

        return o1_output

    def train(self, inputs: List[List[float]], expected_outputs: List[float], learning_rate: float, epochs: int):
        for epoch in range(epochs):
            epoch_error = 0
            for input, expected_output in zip(inputs, expected_outputs):
                # 前向传播
                h1_output = self.h1.feedforward(input)
                h2_output = self.h2.feedforward(input)
                output = self.o1.feedforward([h1_output, h2_output])

                # 计算误差
                error = output - expected_output
                epoch_error += error ** 2

                # 反向传播
                d_error_d_output = 2 * error

                d_output_d_h1output = derivative_sigmoid(sum([weight * i for weight, i in zip(self.o1.weights, [h1_output, h2_output])]) + self.o1.bias) * self.o1.weights[0]
                d_h1output_d_h1weights = [derivative_sigmoid(sum([weight * i for weight, i in zip(self.h1.weights, input)]) + self.h1.bias) * i for i in input]
                d_error_d_h1weights = [d_error_d_output * d_output_d_h1output * d_h1output_d_h1weight for d_h1output_d_h1weight in d_h1output_d_h1weights]

                d_output_d_h2output = derivative_sigmoid(sum([weight * i for weight, i in zip(self.o1.weights, [h1_output, h2_output])]) + self.o1.bias) * self.o1.weights[1]
                d_h2output_d_h2weights = [derivative_sigmoid(sum([weight * i for weight, i in zip(self.h2.weights, input)]) + self.h2.bias) * i for i in input]
                d_error_d_h2weights = [d_error_d_output * d_output_d_h2output * d_h2output_d_h2weight for d_h2output_d_h2weight in d_h2output_d_h2weights]

                d_error_d_o1weights = [d_error_d_output * derivative_sigmoid(sum([weight * i for weight, i in zip(self.o1.weights, [h1_output,h2_output])]) + self.o1.bias) * i for i in [h1_output,h2_output]]

                # 更新权重和偏差
                for i in range(len(self.h1.weights)):
                    self.h1.weights[i] -= learning_rate * d_error_d_h1weights[i]
                    self.h2.weights[i] -= learning_rate * d_error_d_h2weights[i]

                for i in range(len(self.o1.weights)):
                    self.o1.weights[i] -= learning_rate * d_error_d_o1weights[i]

                self.h1.bias -= learning_rate * d_error_d_output * d_output_d_h1output * derivative_sigmoid(sum([weight * i for weight, i in zip(self.h1.weights, input)]) + self.h1.bias)
                self.h2.bias -= learning_rate * d_error_d_output * d_output_d_h2output * derivative_sigmoid(sum([weight * i for weight, i in zip(self.h2.weights, input)]) + self.h2.bias)
                self.o1.bias -= learning_rate * d_error_d_output * derivative_sigmoid(sum([weight * i for weight, i in zip(self.o1.weights, [h1_output,h2_output])]) + self.o1.bias)

            print(f'Epoch {epoch}: Mean Squared Error = {epoch_error / len(inputs)}')

network = NeuralNetwork()
inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
expected_outputs = [0, 1, 1, 0]
learning_rate = 0.5
epochs = 100000
network.train(inputs, expected_outputs, learning_rate, epochs)
for input in inputs:
    print(network.feedforward(input))
