import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib
import matplotlib.pyplot as plt

matplotlib.use('TkAgg')

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

xy = np.loadtxt('../data/Diabetes/diabetes.csv.gz', delimiter=',', dtype=np.float32)
# xy[:, :-1]：使用numpy切片语法，取xy数组的所有行和除最后一列外的所有列
x_data = torch.from_numpy(xy[:, :-1]).to(device)
# xy[:, [-1]]：使用numpy切片语法，提取xy数组的所有行和最后一列，保持二维结构
y_data = torch.from_numpy(xy[:, [-1]]).to(device)


# 构造模型
class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.linear1 = nn.Linear(8, 6)
        self.linear2 = nn.Linear(6, 4)
        self.linear3 = nn.Linear(4, 2)
        self.linear4 = nn.Linear(2, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        x = self.sigmoid(self.linear1(x))
        x = self.sigmoid(self.linear2(x))
        x = self.sigmoid(self.linear3(x))
        x = self.sigmoid(self.linear4(x))
        return x


# 模型
model = Model().to(device)
# 损失函数
criterion = nn.BCELoss()
# 优化器
optimizer = optim.SGD(model.parameters(), lr=0.1)
# 训练轮次
epochs = 10000
train_loss = []
for epoch in range(epochs):
    # 计算预测值
    y_pred = model(x_data)
    # 计算损失
    loss = criterion(y_pred, y_data)
    train_loss.append(loss.item())
    print("epoch: {}, loss: {}".format(epoch, loss.item()))
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

print(model)
print(model.linear1.weight.tolist())
print(model.linear1.bias.tolist())
print(model.linear2.weight.tolist())
print(model.linear2.bias.tolist())
print(model.linear3.weight.tolist())
print(model.linear3.bias.tolist())
print(model.linear4.weight.tolist())
print(model.linear4.bias.tolist())
print(model.linear4.weight.tolist())

plt.figure(figsize=(10, 5))
plt.plot(range(epochs), train_loss)
plt.grid()
plt.show()

# 统计量
total = sum(p.numel() for p in model.parameters())
print("Total parameters: {}".format(total))
# 自定义初始化
for name, p in model.named_parameters():
    print("Parameter name: {}, shape: {}".format(name, p.shape))

model.eval()
with torch.no_grad():
    x_test1 = torch.Tensor([[0.547912, -0.122243, 0.717196, 0.394736, -0.811645, 0.951245, 0.522279, 0.572129]]).to(
        device)
    y_test1 = model(x_test1)
    print("y_test1 =", y_test1.item())

    x_test2 = torch.Tensor([[-0.743773, -0.099228, -0.258404, 0.853530, 0.287730, 0.645523, -0.113172, -0.545523]]).to(
        device)
    y_test2 = model(x_test2)
    print("y_test2 =", y_test2.item())

    x_test3 = torch.Tensor([[0.109170, -0.872366, 0.655262, 0.263329, 0.516176, -0.290948, 0.941396, 0.786242]]).to(
        device)
    y_test3 = model(x_test3)
    print("y_test3 =", y_test3.item())

    x_test4 = torch.Tensor([[0.556767, -0.610723, -0.066558, -0.912392, -0.691421, 0.366098, 0.489524, 0.935019]]).to(
        device)
    y_test4 = model(x_test4)
    print("y_test4 =", y_test4.item())

    x_test5 = torch.Tensor(
        [[-0.348349, -0.259081, -0.060888, -0.621057, -0.740157, -0.048590, -0.546181, 0.339628]]).to(device)
    y_test5 = model(x_test5)
    print("y_test5 =", y_test5.item())
