﻿import numpy as np
import matplotlib.pyplot as plt


# 激活函数及其导数
def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def sigmoid_derivative(fx):
    return fx * (1 - fx)


# 初始化参数
np.random.seed(42)
input_size = 2  # 输入层大小（两个输入节点）
output_size = 1  # 输出层大小

# 权重和偏置初始化
W = np.random.uniform(-1, 1, (input_size, output_size))
b = np.zeros((1, output_size))

# 输入数据（与逻辑的输入）
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
# 输出数据（与逻辑的输出）
y = np.array([[0], [0], [0], [1]])

# 训练参数
learning_rate = 0.1
epochs = 10000

plt.ion()
for epoch in range(epochs):
    # 前向传播
    final_input = np.dot(X, W) + b
    predicted_output = sigmoid(final_input)

    # 计算损失（均方误差）
    loss = np.mean((y - predicted_output) ** 2)

    # 计算误差
    error = y - predicted_output

    # 反向传播
    output_delta = error * sigmoid_derivative(predicted_output)

    # 更新权重和偏置
    W += X.T.dot(output_delta) * learning_rate
    b += np.sum(output_delta, axis=0, keepdims=True) * learning_rate
    # 绘制图像和决策边界
    if epoch % 100 == 0:
        plt.cla()
        plt.scatter(
            X[y.flatten() == 0, 0], X[y.flatten() == 0, 1], color="red", label="label=0"
        )
        plt.scatter(
            X[y.flatten() == 1, 0],
            X[y.flatten() == 1, 1],
            color="blue",
            label="label=1",
        )
        plt.title("Logical Classification Dataset")
        plt.xlabel("x1")
        plt.ylabel("x2")
        plt.legend()

        # 获取模型的权重和偏置
        w1, w2 = W[0, 0], W[1, 0]
        b_value = b[0, 0]

        # 计算决策边界
        x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        x2_min, x2_max = -(w1 * x1_min + b_value) / w2, -(w1 * x1_max + b_value) / w2

        # 绘制拟合直线
        plt.plot([x1_min, x1_max], [x2_min, x2_max], "green", label="Decision Boundary")

        # 显示损失
        plt.text(0.5, -0.5, f"epoch={epoch}, loss={loss:.4f}", fontsize=9)
        plt.pause(1)