﻿import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from Dataset.MyDataset.Classification.logical_classification_dateset import (
    get_data,
)

X, y = get_data(1)

X_train_tensor = torch.tensor(X, dtype=torch.float32)
y_train_tensor = torch.tensor(y, dtype=torch.float32).view(-1, 1)


# 2. 定义模型
class LogisticRegressionModel(nn.Module):
    def __init__(self, input_dim):
        super(LogisticRegressionModel, self).__init__()
        self.linear = nn.Linear(input_dim, 1)

    def forward(self, x):
        return torch.sigmoid(self.linear(x))


# 输入特征的维度
input_dim = X_train_tensor.shape[1]  # 2
model = LogisticRegressionModel(input_dim)

# 3. 定义损失函数和优化器
criterion = nn.BCELoss()  # 二元交叉熵损失
# optimizer = torch.optim.SGD(model.parameters(), lr=0.03)  # 随机梯度下降优化器 太垃圾了
optimizer = torch.optim.Adam(model.parameters(), lr=0.03)


# 4. 训练模型
plt.ion()
num_epochs = 4000
for epoch in range(num_epochs):

    # model.train()

    # 前向传播
    outputs = model(X_train_tensor)
    loss = criterion(outputs, y_train_tensor)

    # 反向传播和优化
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    if epoch % 10 == 0:
        plt.cla()
        plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color="red", label="Class 0")
        plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color="blue", label="Class 1")
        plt.title("Logical Classification Dataset")
        plt.xlabel("Feature 1")
        plt.ylabel("Feature 2")
        plt.legend()

        # 获取模型的权重和偏置
        [w, b] = model.linear.parameters()
        w1, w2 = w[0][0].item(), w[0][1].item()
        b = b.item()
        # 计算决策边界
        x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        x2_min, x2_max = -(w1 * x1_min + b) / w2, -(w1 * x1_max + b) / w2
        # 绘制拟合直线
        plt.plot([x1_min, x1_max], [x2_min, x2_max], "green", label="Decision Boundary")
        # mod_x = X_train_tensor.squeeze().data.numpy()
        # mod_y = -(w1 * mod_x + b) / w2
        # plt.plot(mod_x, mod_y, "green", label="Decision Boundary")
        plt.text(
            1,
            -2,
            "epoch=%d,loss=%.4f, k=%.2f, b=%.2f"
            % (epoch, loss.item(), -(w1 / w2), -(b / w2)),
        )
        plt.pause(0.01)

plt.ioff()
plt.show()
