import os
import sys

from mpmath.libmp.libintmath import ifac2
from torch.utils.data import DataLoader
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from data_loader import iris_dataloader


#初始化神经网络模型
class NN(nn.Module):
    # 初始化神经网络模型层级结构
    def __init__(self,in_dim,hidden_dim1,hidden_dim2,out_dim):
        super(NN,self).__init__()
        self.layer1=nn.Linear(in_dim,hidden_dim1)
        self.layer2=nn.Linear(hidden_dim1,hidden_dim2)
        self.layer3=nn.Linear(hidden_dim2,out_dim)

    # 定义前向传播过程
    def forward(self,x):
        x=self.layer1(x)
        x=self.layer2(x)
        x=self.layer3(x)

        return x

#定义计算环境
device =torch.device("mps:0" if torch.backends.mps.is_available() else "cpu")
#数据集划分：训练集，验证集和测试集
custom_dataset=iris_dataloader('./Iris_data.txt')

train_size=int(len(custom_dataset)*0.7)
val_size=int(len(custom_dataset)*0.2)
test_size=len(custom_dataset)-train_size-val_size
#调用数据集随机切分api random_split
train_dataset,val_dataset,test_dataset=torch.utils.data.random_split(custom_dataset,[train_size,val_size,test_size])

#数据集的加载
train_dataloader=DataLoader(train_dataset,batch_size=16,shuffle=True) #shuffle 每次训练抽出后是否需要打散的操作
val_dataloader=DataLoader(val_dataset,batch_size=1,shuffle=False)
test_dataloader=DataLoader(test_dataset,batch_size=1,shuffle=False)

print("训练集的大小",len(train_dataloader)*16,"验证集的大小",len(val_dataloader),"测试集大小",len(test_dataloader))

#定义推理函数，来计算并返回准确率
def infer(model,dataset,device):
    model.eval()
    acc_num=0
    #上下文管理器
    with torch.no_grad():
        for data,label in dataset:
            data=data.to(device)
            label=label.to(device)
            output=model(data)
            pred=torch.max(output,dim=1)[1]
            acc_num+=torch.eq(pred,label).sum().item()
    acc=acc_num/len(dataset)
    return acc

def main(lr=0.005,epochs=20):
    model=NN(4,12,6,3).to(device)  #中间的内容是超参数，可以进行调整
    loss_f=nn.CrossEntropyLoss()
    #判断参数是否是可训练的
    pg=[p for p in model.parameters() if p.requires_grad]
    optimizer=optim.Adam(pg,lr=lr)

    #权重文件存储路径
    save_path=os.path.join(os.getcwd(),"results/weights")
    if os.path.exists(save_path) is False:
        os.makedirs(save_path)

    #开始训练
    for epoch in range(epochs):
        model.train()
        acc_num=torch.zeros(1).to(device)
        sample_num=0

        train_bar=tqdm(train_dataloader,file=sys.stdout,ncols=100)
        for datas in train_bar:
            data, label =datas
            data, label = data.to(device), label.squeeze(1).to(device)  # 确保数据在同一设备上label= label.squeeze(1)
            sample_num+=data.shape[0]

            optimizer.zero_grad()
            outputs=model(data.to(device))
            pred_class=torch.max(outputs,dim=1)[1] #torch.max返回值是一个索引，第一个元素是max的值，第二个是max值的索引
            acc_num+=torch.eq(pred_class,label.to(device)).sum()

            loss=loss_f(outputs,label.to(device))
            loss.backward()
            optimizer.step()
            train_bar.desc = "train epoch[{}/{}] loss:{:.3f} acc:{:.3f}".format(epoch + 1, epochs, loss,
                                                                                acc_num.item() / sample_num)
        val_acc = infer(model, val_dataloader, device)
        print("epoch[{}/{}] val_acc:{:.3f}".format(epoch + 1, epochs, val_acc))
        torch.save(model.state_dict(), os.path.join(save_path, "model.pth"))

        #每次数据集迭代后，要对初始化的指标进行清零
        train_acc=0
        val_acc=0
    print("训练结束")

    test_acc=infer(model,test_dataloader,device)
    print("测试集的准确率为：",test_acc)
if __name__ == "__main__":
    main()


