import torch
from torch import nn
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm

data = np.loadtxt('../data/xor_dataset.csv', delimiter=',', skiprows=1)  # 跳过标题行

x_all = data[:, :2]  # (N,2)
y_all = data[:, 2:]  # (N,1)

ratio = 0.8
split = int(len(x_all) * ratio)

np.random.seed(0)
idx = np.random.permutation(len(x_all))
x_train = x_all[idx[:split]]
y_train = y_all[idx[:split]]
x_test = x_all[idx[split:]]
y_test = y_all[idx[split:]]

torch_activation_dict = {
    'identity': lambda x: x,
    'sigmoid': torch.sigmoid,
    'relu': torch.relu,
    'tanh': torch.tanh,
}


# 定义MLP类，基于PyTorch的自定义模块通常都继承nn.Module
class MLP_torch(nn.Module):
    def __init__(
            self,
            layer_sizes,  # 包含每层大小的list，例如[2,4,1]
            use_bias=True,  #
            activation='relu',
            out_activation='identity'
    ):
        super().__init__()  # 初始化父类里的全部参数
        self.activation = torch_activation_dict[activation]
        self.out_activation = torch_activation_dict[out_activation]
        self.layers = nn.ModuleList()
        num_in = layer_sizes[0]
        for num_out in layer_sizes[1:]:
            self.layers.append(nn.Linear(num_in, num_out, bias=use_bias))
            nn.init.normal_(self.layers[-1].weight, mean=0, std=1.0)
            nn.init.normal_(self.layers[-1].bias, mean=0, std=1.0)
            num_in = num_out

    def forward(self, x):
        # 前向传播
        # PyTorch可以自行处理batch_size等维度问题，我们只需要让输入依次通过每一层即可
        for i in range(len(self.layers) - 1):
            x = self.layers[i](x)
            x = self.activation(x)

        x = self.layers[-1](x)
        x = self.out_activation(x)

        return x


# 设置超参数
num_epochs = 1000
learning_rate = 0.1
batch_size = 128
eps = 1e-7
torch.manual_seed(0)

mlp = MLP_torch(layer_sizes=[2, 4, 1], use_bias=True, activation='relu', out_activation='sigmoid')
opt = torch.optim.SGD(mlp.parameters(), lr=learning_rate)

# 训练过程
# -------------------------------------------------
# 1. 重新跑一遍训练，同时记录训练准确率
# -------------------------------------------------
# 先清空旧记录
losses      = []
train_accs  = []   # 新增
test_losses = []
test_accs   = []

for epoch in tqdm(range(num_epochs)):
    st = 0
    epoch_loss  = []
    epoch_acc   = []

    # ---- 训练阶段 ----
    mlp.train()
    while True:
        ed = min(len(x_train), st + batch_size)
        if st >= ed:
            break
        x = torch.tensor(x_train[st:ed], dtype=torch.float32)
        y = torch.tensor(y_train[st:ed], dtype=torch.float32)

        y_pred = mlp(x)
        train_loss = torch.mean(
            -y * torch.log(y_pred + eps) - (1 - y) * torch.log(1 - y_pred + eps)
        )

        opt.zero_grad()
        train_loss.backward()
        opt.step()

        # 记录训练损失 & 训练准确率
        epoch_loss.append(train_loss.detach().numpy())
        epoch_acc.append(torch.mean((torch.round(y_pred) == y).float()).detach().numpy())

        st += batch_size

    losses.append(np.mean(epoch_loss))
    train_accs.append(np.mean(epoch_acc))

    # ---- 测试阶段 ----
    mlp.eval()
    with torch.inference_mode():
        x = torch.tensor(x_test, dtype=torch.float32)
        y = torch.tensor(y_test, dtype=torch.float32)
        y_pred = mlp(x)

        test_loss = torch.mean(
            -y * torch.log(y_pred + eps) - (1 - y) * torch.log(1 - y_pred + eps)
        )
        test_acc = torch.mean((torch.round(y_pred) == y).float())

        test_losses.append(test_loss.numpy())
        test_accs.append(test_acc.numpy())

# -------------------------------------------------
# 2. 画图
# -------------------------------------------------
plt.figure(figsize=(14, 5))

# 图 1：Loss 曲线
plt.subplot(1, 2, 1)
plt.plot(losses, label='Train Loss')
plt.plot(test_losses, label='Test Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training & Test Loss')
plt.legend()

# 图 2：Accuracy 曲线
plt.subplot(1, 2, 2)
plt.plot(train_accs, label='Train Acc')
plt.plot(test_accs, label='Test Acc')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim(0, 1.05)
plt.title('Training & Test Accuracy')
plt.legend()

plt.tight_layout()
plt.show()