import os
import sys
import torch.utils
from torch.utils.data import DataLoader
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from data_loader import iris_dataloader

# 初始化神经网络
class NN(nn.Module):
    def __init__(self, input_size, hidden_size1, hidden_size2, output_size):
        """
        初始化神经网络
        :param input_size: 输入层大小
        :param hidden_size: 隐藏层大小
        :param output_size: 输出层大小
        """
        super().__init__()
        self.layer1 = nn.Linear(input_size, hidden_size1)
        self.layer2 = nn.Linear(hidden_size1, hidden_size2)
        self.layer3 = nn.Linear(hidden_size2, output_size)
        
    def forward(self, x):
        """
        前向传播
        :param x: 输入数据
        :return: 输出数据
        """
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        return x
    
# 定义计算环境
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# 数据集划分
dataset = iris_dataloader('iris.csv')
train_size = int(0.7 * len(dataset))
test_size = int(0.2 * len(dataset))
val_size = len(dataset) - train_size - test_size
train_dataset, test_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, test_size, val_size])

train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)

print(f'训练集大小: {len(train_loader) * 16}, 测试集大小: {len(test_loader)}, 验证集大小: {len(val_loader)}')

# 定义一个推理函数计算, 并返回准确率
def inference(model, dataloader, device):
    """
    计算模型在数据集上的准确率
    :param model: 模型
    :param dataloader: 数据集
    :return: 准确率
    """
    # 切换到评估模式
    model.eval()
    correct = 0
    total = 0
    with torch.no_grad():
        for data in dataloader:
            datas, labels = data
            outputs = model(datas.to(device))
            predicted = torch.max(outputs, 1)
            correct += (predicted[1] == labels.to(device)).sum().item()
            total += labels.size(0)
    return correct / total

def main(lr=0.005, epochs=100):
    model = NN(4, 16, 8, 3).to(device)
    loss_function = nn.CrossEntropyLoss()
    # 找到模型中的可训练参数
    pg = [p for p in model.parameters() if p.requires_grad]
    optimizer = optim.Adam(pg, lr=lr)
    # 权重文件的存储路径
    save_path = os.path.join(os.getcwd(), 'results')
    if os.path.exists(save_path) is False:
        os.makedirs(save_path)
    # 训练
    for epoch in range(epochs):
        model.train()
        acc_num = torch.zeros(1).to(device)
        sample_num = 0
        
        train_bar = tqdm(train_loader, file=sys.stdout, ncols=100)
        for datas in train_bar:
            data, labels = datas
            #label = label.squeeze(-1)
            sample_num += data.shape[0]
            # 优化器梯度清零
            optimizer.zero_grad()
            outputs = model(data.to(device))
            # torch.max()返回两个值, 第一个值是最大值, 第二个值是最大值的索引
            pred_class = torch.max(outputs, dim=1)[1]
            acc_num += torch.eq(pred_class, labels.to(device)).sum()
            # 计算损失
            loss = loss_function(outputs, labels.to(device))
            # 反向传播
            loss.backward()
            # 更新参数
            optimizer.step()
            
            # 打印训练信息
            train_bar.desc = f"Epoch[{epoch + 1}/{epochs}] loss:{loss.item():.3f}"
    
        val_acc = inference(model, val_loader, device)
        print(f"Epoch[{epoch + 1}/{epochs}] val_acc:{val_acc:.3f}, train_acc:{acc_num.item() / sample_num:.3f}")
        
        # 保存模型
        torch.save(model.state_dict(), os.path.join(save_path, f'nn.pth'))
        train_acc = 0
        val_acc = 0
        
    print('🚀Finished!')
    
    # 测试
    test_acc = inference(model, test_loader, device)
    print(f'test_acc: {test_acc:.3f}')
    
if __name__ == '__main__':
    main()