﻿import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt


# 定义一个简单的感知机模型
class Perceptron(nn.Module):
    def __init__(self):
        super(Perceptron, self).__init__()
        # 输入层到输出层的线性变换，输入2个特征，输出1个结果
        self.linear = nn.Linear(2, 1)

    def forward(self, x):
        # 使用Sigmoid激活函数将输出映射到[0,1]
        return torch.sigmoid(self.linear(x))


# 创建模型实例
model = Perceptron()

# 定义损失函数和优化器
criterion = nn.BCELoss()  # 二分类交叉熵损失
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)  # 随机梯度下降

# 逻辑与问题的数据集
# 输入数据：四种输入组合
X = np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])   
y = np.array([0, 0, 0, 1])
inputs = torch.tensor(X, dtype=torch.float32)
# 标签：逻辑与的结果
labels = torch.tensor(y, dtype=torch.float32).view(-1, 1)

# 训练模型
plt.ion()
epochs = 1000
for epoch in range(epochs):
    # 前向传播
    outputs = model(inputs)
    loss = criterion(outputs, labels)

    # 反向传播和优化
    optimizer.zero_grad()  # 梯度归零
    loss.backward()  # 计算梯度
    optimizer.step()  # 更新权重

    if epoch % 10 == 0:
        plt.cla()
        plt.scatter(X[y == 0, 0], X[y == 0, 1], color="red", label="0")
        plt.scatter(X[y == 1, 0], X[y == 1, 1], color="blue", label="1")
        plt.title("And Classification")
        plt.xlabel("x1")
        plt.ylabel("x2")
        plt.legend()

        # 获取模型的权重和偏置
        [w, b] = model.linear.parameters()
        w1, w2 = w[0][0].item(), w[0][1].item()
        b = b.item()
        # 计算决策边界
        x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        x2_min, x2_max = -(w1 * x1_min + b) / w2, -(w1 * x1_max + b) / w2
        # 绘制拟合直线
        plt.plot([x1_min, x1_max], [x2_min, x2_max], "green", label="Decision Boundary")
        # mod_x = X_train_tensor.squeeze().data.numpy()
        # mod_y = -(w1 * mod_x + b) / w2
        # plt.plot(mod_x, mod_y, "green", label="Decision Boundary")
        plt.text(
            0,
            2,
            "epoch=%d,loss=%.4f, k=%.2f, b=%.2f"
            % (epoch, loss.item(), -(w1 / w2), -(b / w2)),
        )
        plt.pause(0.01)

plt.ioff()
plt.show()
