import numpy as np


# 激活函数及其导数
def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def sigmoid_derivative(x):
    return x * (1 - x)


# 损失函数及其导数（这里使用交叉熵损失）
def cross_entropy_loss(y_true, y_pred):
    return -np.mean(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred))


def cross_entropy_loss_derivative(y_true, y_pred):
    return - (y_true / y_pred) + ((1 - y_true) / (1 - y_pred))


# 单层神经网络类
class SimpleNeuralNetwork:
    def __init__(self, input_size, output_size):
        self.weights = np.random.randn(input_size, output_size)
        self.bias = np.zeros((1, output_size))

    def forward(self, X):
        self.z = np.dot(X, self.weights) + self.bias
        self.a = sigmoid(self.z)
        return self.a

    def backward(self, X, y_true, y_pred):
        # 计算损失函数的导数
        delta = cross_entropy_loss_derivative(y_true, y_pred)

        # 计算权重和偏置的梯度
        dz = delta * sigmoid_derivative(self.a)
        dw = np.dot(X.T, dz)
        db = np.sum(dz, axis=0, keepdims=True)

        # 更新权重和偏置
        self.weights -= learning_rate * dw
        self.bias -= learning_rate * db


# 超参数
learning_rate = 0.1
epochs = 1000

# 创建神经网络实例
input_size = 2  # 假设输入特征数量为2
output_size = 1  # 输出类别数量为1（二分类）
nn = SimpleNeuralNetwork(input_size, output_size)

# 假设的训练数据集
X_train = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y_train = np.array([[0], [1], [1], [0]])  # 标签

# 训练神经网络
for epoch in range(epochs):
    # 前向传播
    predictions = nn.forward(X_train)

    # 计算损失
    loss = cross_entropy_loss(y_train, predictions)

    # 反向传播
    nn.backward(X_train, y_train, predictions)

    # 打印损失
    if epoch % 100 == 0:
        print(f'Epoch {epoch}, Loss: {loss}')

# 测试神经网络
X_test = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
print("Predictions:")
print(nn.forward(X_test))