
import os
import torch
import warnings
import numpy as np
import torch.nn as nn
from visdom import Visdom
import torch.optim as optim
from tqdm import tqdm
from torchvision import transforms
from torch.utils.data import DataLoader
from sklearn.metrics import roc_auc_score
from tools.conduct import val


from tools.conduct import train
from tools.dataload import CovidCTDataset,CovidXCRDataset,CovidDataset
from models import ModelDict
#  预处理，标准化与图像增强

normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # 依通道标准化

train_transformer = transforms.Compose([
    transforms.Resize(256),
    transforms.RandomResizedCrop((224), scale=(0.5, 1.0)),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    normalize
])

val_transformer = transforms.Compose([
    transforms.Resize(224),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    normalize
])



# # 实例化CovidCTDataset
# trainset = CovidCTDataset(root_dir='../COVID-CT/data',
#                           txt_COVID='../COVID-CT/data/trainCT_COVID.txt',
#                           txt_NonCOVID='../COVID-CT/data/trainCT_NonCOVID.txt',
#                           transform=train_transformer)
# valset = CovidCTDataset(root_dir='../COVID-CT/data',
#                         txt_COVID='../COVID-CT/data/valCT_COVID.txt',
#                         txt_NonCOVID='../COVID-CT/data/valCT_NonCOVID.txt',
#                         transform=val_transformer)

# # 实例化XCR DataSet
# trainset = CovidXCRDataset(root_dir='../COVID-CT/data',
#                            txt_COVID='../COVID-CT/data/trainXCR_COVID.txt',
#                            txt_NonCOVID='../COVID-CT/data/trainXCR_NonCOVID.txt',
#                            transform=train_transformer)
# valset = CovidXCRDataset(root_dir='../COVID-CT/data',
#                          txt_COVID='../COVID-CT/data/valXCR_COVID.txt',
#                          txt_NonCOVID='../COVID-CT/data/valXCR_NonCOVID.txt',
#                          transform=val_transformer)




# 实例化all DataSet
trainset = CovidDataset(root_dir='../COVID-CT/data',
                           txt_COVID='../COVID-CT/data/trainXCR_COVID.txt',
                           txt_NonCOVID='../COVID-CT/data/trainXCR_NonCOVID.txt',
                           transform=train_transformer)
valset = CovidDataset(root_dir='../COVID-CT/data',
                         txt_COVID='../COVID-CT/data/valXCR_COVID.txt',
                         txt_NonCOVID='../COVID-CT/data/valXCR_NonCOVID.txt',
                         transform=val_transformer)


# if __name__ == '__main__':
def train_fun(model,batchsize = 16,total_epoch = 200,votenum = 10,modelname = 'XCR_resnet50'):
    print('train start *************************************************  model --------------------{}'.format(modelname))
    os.makedirs('./model_save/'+modelname,exist_ok=True)
    # 构建DataLoader
    train_loader = DataLoader(trainset, batch_size=batchsize, drop_last=False, shuffle=True)
    val_loader = DataLoader(valset, batch_size=batchsize, drop_last=False, shuffle=False)

    criteria = nn.CrossEntropyLoss()  # 二分类用交叉熵损失
    # ----------------------------- step 4/5 优化器 -----------------------------

    optimizer = optim.Adam(model.parameters(), lr=1e-4)  # Adam优化器
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_epoch)  # 动态调整学习率策略，初始学习率0.0001

    best_auc = 0
    for epoch in tqdm(range(1, total_epoch + 1)):

        train_loss = train(optimizer, epoch, model, train_loader, modelname, criteria)  # 进行一个epoch训练的函数
        scheduler.step(epoch)
        if epoch % votenum == 0:  # 每10个epoch，验证一次
            targetlist, scorelist, predlist, val_loss = val(model, val_loader, criteria)  # 用验证集验证
            TP = ((predlist == 1) & (targetlist == 1)).sum()
            TN = ((predlist == 0) & (targetlist == 0)).sum()
            FN = ((predlist == 0) & (targetlist == 1)).sum()
            FP = ((predlist == 1) & (targetlist == 0)).sum()


            p = TP / (TP + FP)
            r = TP / (TP + FN)
            # print('recall', r)
            F1 = 2 * r * p / (r + p)
            acc = (TP + TN) / (TP + TN + FP + FN)
            # print('F1', F1)
            # print('acc', acc)
            AUC = roc_auc_score(targetlist, scorelist)
            # print('AUCp', roc_auc_score(targetlist, vote_pred))
            # print('AUC', AUC)

            # 训练过程可视化
            train_loss = train_loss.cpu().detach().numpy()
            val_loss = val_loss.cpu().detach().numpy()

            print(
                '\n The epoch is {}, average recall: {:.4f}, average precision: {:.4f},average F1: {:.4f}, '
                'average accuracy: {:.4f}, average AUC: {:.4f}'.format(
                    epoch, r, p, F1, acc, AUC))

            # vote_pred = np.zeros(valset.__len__())
            # vote_score = np.zeros(valset.__len__())
            f = open('record/{}.txt'.format(modelname), 'a+')
            f.write(
                '\n The epoch is {}, average recall: {:.4f}, average precision: {:.4f},average F1: {:.4f}, '
                'average accuracy: {:.4f}, average AUC: {:.4f}，train loss:{:.4f},val loss:{:.4f}'.format(
                    epoch, r, p, F1, acc, AUC,train_loss,val_loss))
            f.close()
            if AUC > best_auc:
                torch.save(model.state_dict(), "{}/best_auc.pt".format('./model_save/'+modelname))
                best_auc = AUC


if __name__ == '__main__':
    '''
    依次训练全部模型
    '''
    print('start train---')
    Type = 'ALL_'
    for k,v in ModelDict.items():
        train_fun(model=v['model'],batchsize=v['bs'],total_epoch=200,votenum=10,modelname=Type+k)



