import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import matplotlib
import matplotlib.pyplot as plt

matplotlib.use('TkAgg')

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 控制数据传输是否阻塞当前线程
# 当设置为 True 时，数据从CPU到GPU的传输是非阻塞的，主线程不会等待传输完成
# 当设置为 False 时，数据传输是同步的，主线程会等待传输完成后再继续执行
non_blocking = torch.cuda.is_available()


class DiabetesDataset(Dataset):
    def __init__(self, filepath):
        xy = np.loadtxt(filepath, delimiter=',', dtype=np.float32)
        self.len = xy.shape[0]
        self.x_data = torch.from_numpy(xy[:, :-1])
        self.y_data = torch.from_numpy(xy[:, [-1]])

    def __getitem__(self, index):
        return self.x_data[index], self.y_data[index]

    def __len__(self):
        return self.len


dataset = DiabetesDataset("../data/Diabetes/diabetes.csv.gz")
# pin_memory=True 的作用是：
# 主要功能:
# 启用页锁定内存（pinned memory）
# 将数据加载到页锁定内存区域，而不是普通的虚拟内存
# 优势:
# 加速数据传输：从CPU到GPU的数据传输速度显著提升
# 支持异步传输：与 non_blocking=True 配合使用时，可以实现真正的异步数据传输
# 减少内存拷贝开销：避免数据在内存页面间的复制操作
# 工作原理:
# 普通内存可能会被操作系统交换到磁盘（虚拟内存机制）
# 页锁定内存固定在物理RAM中，不会被交换出去
# GPU可以直接访问页锁定内存，传输效率更高
train_loader = DataLoader(dataset, batch_size=32, shuffle=True, pin_memory=True)


class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.linear1 = nn.Linear(8, 5)
        self.linear2 = nn.Linear(5, 3)
        self.linear3 = nn.Linear(3, 1)
        self.act = nn.Sigmoid()

    def forward(self, x):
        x = self.act(self.linear1(x))
        x = self.act(self.linear2(x))
        x = self.linear3(x)
        return x


model = Model().to(device)

criterion = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)

epochs = 100
counts = 0
train_loss = []

model.train()
for epoch in range(epochs):
    for i, (inputs, labels) in enumerate(train_loader):
        inputs = inputs.to(device, non_blocking=non_blocking)
        labels = labels.to(device, non_blocking=non_blocking)
        logits = model(inputs)
        loss = criterion(logits, labels)
        train_loss.append(loss.item())
        optimizer.zero_grad(set_to_none=True)
        loss.backward()
        optimizer.step()
        counts += 1
        print("count: {}, loss: {}".format(counts, loss.item()))

model.eval()
with torch.no_grad():
    x_test = torch.Tensor(
        [[0.547912, -0.122243, 0.717196, 0.394736, -0.811645, 0.951245, 0.522279, 0.572129]]
    ).to(device)
    y_test = model(x_test)
    print("y_test =", y_test.item())

plt.figure(figsize=(10, 5))
plt.plot(range(counts), train_loss)
plt.grid()
plt.show()
