import numpy as np
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader


# 定义数据集
class MyDataset(Dataset):
    def __init__(self):
        # 从文件加载好数据,x是输入的数据,y是label
        xy = np.loadtxt("./data/8x_1y.csv", delimiter=",", dtype=np.float32)
        # 所有行,除了最后一列作为x数据
        self.x_data = torch.from_numpy(xy[:, :-1])
        # 最后一列是y
        self.y_data = torch.from_numpy(xy[:, [-1]])
        # 数据集的大小(样本总数)
        self.len = len(self.x_data)

    def __getitem__(self, index):
        # 返回对应索引的样本
        return self.x_data[index], self.y_data[index]

    def __len__(self) -> int:
        # 返回样本总数量
        return self.len


# 定义多维模型
class NeuralNetwork(nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        # 定义三层神经网络,全连接(Fully Connected，FC)网络结构
        self.fc1 = nn.Linear(8, 6)
        self.fc2 = nn.Linear(6, 4)
        self.fc3 = nn.Linear(4, 1)
        # 定义激活函数
        self.activation = nn.ReLU()

    def forward(self, x):
        x = self.activation(self.fc1(x))
        x = self.activation(self.fc2(x))
        # 最后使用logistic regression进行映射,防止ReLU出现ln0
        y_pred = torch.sigmoid(self.fc3(x))
        return y_pred


if __name__ == '__main__':
    # 初始化数据集
    myDataset = MyDataset()
    # 初始化数据加载,每3个样本为一个mini-batch,shuffle为ture时会乱序组合mini-batch(每个epoch遍历所有数据时,
    # 会组合成不同的mini-batch),num_workers决定加载数据的进程数
    dataLoader = DataLoader(myDataset, batch_size=3, shuffle=True, num_workers=2)

    # 初始化模型
    model = NeuralNetwork()

    # 定义损失
    criterion = nn.BCELoss()
    # 定义优化器
    optimizer = torch.optim.Rprop(model.parameters(), lr=0.1)

    # 训练
    for epoch in range(20):
        for i, batch in enumerate(dataLoader):
            # 获得当前batch的x/y
            x, y = batch
            # 计算y_hat
            y_pred = model(x)
            # 计算损失
            loss = criterion(y_pred, y)
            # 清零以前的梯度
            optimizer.zero_grad()
            # 反向传播
            loss.backward()
            # 梯度下降,权重更新
            optimizer.step()
            # 打印当前的状态
            print(f"Epoch: {epoch}, Batch: {i}, Loss: {loss.item()}")

    # 尝试计算一组数据,并获得结果
    y_pred = model(torch.Tensor([-0.99, -0.15, 0.08, -0.41, 0, -0.51, -0.27, -0.67]))

    print(y_pred.item())
