#李聪 2022/3/6
#训练的代码
import torch
import torchvision
from torch.utils.data import DataLoader, random_split

from datasets import *



if __name__ == '__main__':
    device=torch.device("cuda")
    model_name = "xin_model_21_0.91.pth"
    root_dir = "../autodl-tmp/AID"
    label_dir_list = os.listdir(root_dir)
    AID = AID_Dataset(root_dir, label_dir_list)
    #划分训练集数据集
    train_dataset,test_dataset=random_split(dataset=AID,lengths=[7000,3000],generator=torch.Generator().manual_seed(0))
    train_dataloader=DataLoader(dataset=train_dataset,batch_size=64,shuffle=True)
    test_dataloader = DataLoader(dataset=test_dataset, batch_size=64, shuffle=True)
    # print(len(train_dataset))
    # print(len(test_dataset))
    # for data in dataloader:
    #     imgs,label=data
    #     print(imgs.shape)
    #     print(label)
    #resnet = torchvision.models.resnet152(pretrained=True)
    # print(resnet)
    # resnet.fc = torch.nn.Linear(in_features=2048, out_features=2048, bias=True)
    # resnet.add_module("last_Linear", torch.nn.Sequential(
    #     torch.nn.ReLU(inplace=True),
    #     torch.nn.Dropout(p=0.2),
    #     torch.nn.Linear(in_features=2048, out_features=2048, bias=True),
    #     torch.nn.ReLU(inplace=True),
    #     torch.nn.Dropout(p=0.2),
    #     torch.nn.Linear(in_features=2048, out_features=1024, bias=True),
    #     torch.nn.ReLU(inplace=True),
    #     torch.nn.Linear(in_features=1024, out_features=30, bias=True)
    # ))
    #print(resnet)
    # resnet.fc=torch.nn.Sequential(
    #     torch.nn.Linear(in_features=2048,out_features=2048,bias=True),
    #     torch.nn.ReLU(inplace=True),
    #     torch.nn.Dropout(p=0.2),
    #     torch.nn.Linear(in_features=2048,out_features=1024,bias=True),
    #     torch.nn.ReLU(inplace=True),
    #     torch.nn.Dropout(p=0.2),
    #     torch.nn.Linear(in_features=1024,out_features=30,bias=True)
    # )
    resnet=torch.load(model_name)
    resnet.to(device)
    epoch=1
    total_training=0
    loss_fn=torch.nn.CrossEntropyLoss()
    loss_fn.to(device)
    optimer=torch.optim.Adam(resnet.parameters(),lr=0.001)
    explr=torch.optim.lr_scheduler.ExponentialLR(optimer, gamma=0.95)
    for i in range(epoch):
        resnet.train()
        print("********第{}轮训练开始**********".format(i))
        for data in train_dataloader:
            optimer.zero_grad()
            imgs,label=data
            imgs=imgs.to(device)
            label=label.to(device)
            outputs=resnet(imgs)
            #print(outputs.shape)
            #input()
            loss=loss_fn(outputs,label)
            loss.backward()
            optimer.step()
            total_training=total_training+1
            if total_training%100==0:
                print("第{}次训练loss:{}".format(total_training,loss))
        explr.step()
        total_acc=0
        resnet.eval()
        with torch.no_grad():
            for data in test_dataloader:
                imgs,label=data
                imgs = imgs.to(device)
                label = label.to(device)
                outputs=resnet(imgs)
                print("######################")
                print("预测结果")
                print(outputs.argmax(1))
                print("标签值")
                print(label)
                accuracy= (outputs.argmax(1)==label).sum()
                total_acc=total_acc+accuracy
        print("总的准确率acc={}".format(total_acc/len(test_dataset)))
        if total_acc/len(test_dataset)>0.8:
            torch.save(resnet,"xin_model_{}_{:.2f}.pth".format(i,total_acc/len(test_dataset)))
            print("**模型预测已保存")
