import torchvision
import torch
import os
import datetime
from tqdm import tqdm
import torch.backends.cudnn as cudnn
from torchvision import transforms
from model.PR_Grim_EffiNet import PR_EffiNet
from model.efficientNetv2 import effnetv2_s
from model.efficientNetv1 import EfficientNet
from model.vision_transformer import ViT
from model.ghostNet import ghostnet
import config as cfg

#------------------一些参数---------------------------------
train_data_path = cfg.train_data_path          #数据集路径,一定要改的地方
checkpoint_path = cfg.checkpoint_path  #断点pth文件路径,一定要改的地方
val_data_path = cfg.val_data_path    #一定要改的地方
train_acc_txt_path = cfg.train_inform  #训练准确率保存txt,一定要改的地方
batch_size = 8
val_batch_size = 8
input_shape = cfg.image_shape         #输入图片裁剪的大小
init_lr = 0.001
start_epoch = 0
epoch = 100
use_shuffle = True              #训练集是否打乱
use_checkpoint = False           #是否断点续训
cuda = cfg.cuda                     #是否使用cuda
num_worker = 2                  #dataloader的线程数量
drop_last = True                #是否丢弃除不尽的数据
lr_method = 'CyclicLR'            #学习率下降方法['CosineAnnealingLR','StepLR','ReduceLROnPlateau','']
select_model = cfg.selected_model             #模型大小选择,['B0','B1','B2','B3','B4','B5','effiv2','effiv1','ghostnet','vit']
use_pretrain = False
#-----------------------------一些函数--------------------------------------
def get_length_classes(path:str)->int:
    '''
    得到某个文件下的所有类别数量
    '''
    count = len(os.listdir(path))
    print("类别数量 ：%d"%count)
    return count

def get_lr(optimizer):
    for param_group in optimizer.param_groups:
        return param_group['lr']

def get_dataset_length(path:str)->int:
    count = 0
    for dir_class in os.listdir(path):
        count += len(os.listdir(os.path.join(path,dir_class)))
    print("文件：%s \n 数量为:%d"%(path,count))
    return count

def mkdir(path:str):
    if not os.path.isdir(path):
        os.makedirs(path)
    else:
        return

def fit_one_epoch(model:torch.nn.Module,current_epoch:int,optimizer:torch.optim.Optimizer,scheduler,
                  loss_function,train_data_loader,val_data_loader,count_train,count_val):
    '''
    这个函数进行一次训练和验证
    Args:
        model: 用来训练和验证的模型
        current_epoch: 当前迭代的epoch
        optimizer: 优化器
        loss_function: 损失函数，criterion
        count_train:训练集数量
        count_val:验证集数量
    '''
    train_total_loss = 0.0
    train_correct_sum = 0
    #------------------------------训练阶段-------------------------------------------------
    model.train()
    with tqdm(desc="Epoch "+str(current_epoch), postfix=dict, total=count_train//batch_size, ncols=150) as pbar:
        for step,(inputs,labels) in enumerate(train_data_loader):
            if cuda:
                inputs = torch.autograd.Variable(inputs).cuda()
                labels = torch.autograd.Variable(labels).cuda()
            else:
                inputs = torch.autograd.Variable(inputs)
                labels = torch.autograd.Variable(labels)

            optimizer.zero_grad()                       #梯度置零
            outputs = model(inputs)                     #根据模型得到输出
            loss = loss_function(outputs,labels)

            loss.backward()                             #误差反向传播
            optimizer.step()                            #执行单步的优化
            train_total_loss += loss.item()             #相加当前step的损失

            _, predicts = torch.max(outputs, 1)
            train_correct_num = torch.eq(predicts, labels).sum()
            train_correct_sum += train_correct_num            #正确的数量
            acc = float(train_correct_sum/count_train)   #正确的数量占总数
            average_loss = train_total_loss/(step+1)    #当前step之前的训练损失

            pbar.set_postfix(**{'lr':get_lr(optimizer),'train loss':average_loss,'acc':acc})
            pbar.update(1)
        train_line = ("Epoch:%-3d,lr:%-15.15f,tra|loss:%-5.5f,acc:%-5.5f%%|"%(current_epoch,get_lr(optimizer),average_loss,acc*100))
    pbar.close()

    model.eval()
    val_total_loss = 0.0
    val_correct_sum = 0
    for step, (inputs, labels) in enumerate(val_data_loader, 0):
        with torch.no_grad():#被这句话包住的代码，不会跟踪反向梯度计算，不会生成计算图，不会计算梯度，更不会进行反向传播，
            if cuda:
                inputs_val = torch.autograd.Variable(inputs).cuda()
                labels_val = torch.autograd.Variable(labels).cuda()
            else:
                inputs_val = torch.autograd.Variable(inputs)
                labels_val = torch.autograd.Variable(labels)

            optimizer.zero_grad()  # 梯度置零
            outputs = model(inputs_val)
            loss = loss_function(outputs, labels_val)
            val_total_loss += loss.item()

            _, val_pre = torch.max(outputs, 1)
            val_correct_num = torch.eq(val_pre, labels_val).sum()   #正确总数
            val_correct_sum += val_correct_num

            val_loss = val_total_loss / (step + 1)
            val_acc = float(val_correct_sum/count_val)                      #正确数量占总数的多少
    print(f'val loss{val_loss},acc_{val_acc}')

    scheduler.step()  # 一个epoch后，学习率衰减
    save_pth(current_epoch,model, optimizer, scheduler,
             ckpt_name=f"Epoch{current_epoch}LR_{get_lr(optimizer)}TrainLoss_{average_loss}ValLoss_{val_loss}_ValAcc{val_acc*100}")
    #-----------保存训练信息---------------
    val_line = (",val|loss:%-5.5f,acc:%-5.5ff%%|\n"%(val_loss,val_acc*100))
    with open(train_acc_txt_path,'a') as f:
        if current_epoch == 0:
            f.write(f"---model:{select_model},image_size:{input_shape},lr:{lr_method},epoch:{epoch},"
                    f"classes{get_length_classes(train_data_path)},time:{datetime.datetime.now().strftime('%m-%d')}---\n")
        f.write(train_line+val_line)

def save_pth(cur_epoch,model,optimizer,scheduler,ckpt_name):
    print('---------------------保存模型------------------------------------')
    checkpoint_dict = {'net': model.state_dict(),
                       'epoch':cur_epoch,
                       'optimizer': optimizer.state_dict(),
                       'scheduler':scheduler.state_dict()}
    torch.save(checkpoint_dict, checkpoint_path + f"/{ckpt_name}.pth")
    torch.save(checkpoint_dict, checkpoint_path + f"/LatestCkpt.pth")

if __name__ == "__main__":
    mkdir(checkpoint_path)
    # -----------------------------数据准备,数据预处理-------------------------------------------------
    # 声明一个transform对象，将几种转换方法组成在一起
    transform = torchvision.transforms.Compose([
        # transforms.CenterCrop(10),
        transforms.Resize((input_shape[1],input_shape[2])),  # 重置图像分辨率,会降低分辨率
        # transforms.RandomHorizontalFlip(),  # 根据某种概率水平翻转(default:p=0.5),关于左右对称轴左右对称
        # transforms.Grayscale(input_shape[0]),  # 转换为灰度图,3就是灰色，1就是加绿色(default)
        # transforms.RandomRotation(2),  # 随机旋转，表示随机旋转+-角度范围
        #transforms.ColorJitter(brightness=,contrast=,saturation=,hue=)#亮度(>0)，对比度(>0)，饱和度(>0)，色相(-0.5,0.5)
        #transforms.LinearTransformation(),#线性变换，可用于白化处理
        # transforms.RandomCrop(),    #随机选择区域裁剪
        # transforms.RandomResizedCrop(),#随机大小，随机长宽比裁剪原始图片，最后将图片resize到设定好的size
        # transforms.FiveCrop(),      #上下左右中心裁剪，获得5张图片，返回一个4D-tensor参数
        # transforms.TenCrop(),       #上下左右中心裁剪翻转，获得10张图片
        # transforms.RandomVerticalFlip(),# 根据某种概率垂直翻转(default:p=0.5),关于垂直对称轴左右对称
        # transforms.Normalize(),     #按通道进行标准化，先减均值再除标准差
        # transforms.Pad(),#填充
        # transforms.RandomAffine(),  #仿射变换,可理解投影
        # transforms.RandomGrayscale(),#依据概率p变成灰度图
        # transforms.RandomChoice(),#随机在给定的一系列transform中选择一个进行操作、
        # transforms.RandomApply(),#给一个transform加概率，以某种概率进行操作
        # transforms.RandomOrder(),#对transforms的操作随机打乱
        transforms.ToTensor()  # 将PIL Image或numpy.ndarry转换成torch.Tensor
    ])
    # ----------------------------------数据读取,一个通用的数据加载程序---------------------
    train_data = torchvision.datasets.ImageFolder(root=train_data_path, transform=transform)
    train_data_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=batch_size, shuffle=use_shuffle,
                                                    num_workers=num_worker, drop_last=drop_last)  # 设置数据集装载器

    val_data = torchvision.datasets.ImageFolder(root=val_data_path, transform=transform)
    val_data_loader = torch.utils.data.DataLoader(dataset=val_data, batch_size=val_batch_size, num_workers=num_worker,
                                                  drop_last=drop_last,shuffle=use_shuffle)

    count_train = get_dataset_length(path=train_data_path) - get_dataset_length(path=train_data_path) % batch_size
    count_val = get_dataset_length(path=val_data_path) - get_dataset_length(path=val_data_path) % val_batch_size
    print("训练集数量:%d,验证集数量:%d"%(count_train,count_val))
    # ----------------------------模型读入----------------------------------------------
    num_class = get_length_classes(train_data_path)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    if select_model == 'effiv2':
        model = effnetv2_s(num_classes=num_class)
        print('selected model is :',select_model)
    elif select_model == 'effiv1':
        model = EfficientNet(width_coeff=1,depth_coeff=1,num_classes=num_class)
        print('selected model is :',select_model)
    elif select_model == 'ghostnet':
        model = ghostnet(num_classes=num_class)
        print('selected model is :',select_model)
    elif select_model == 'vit':
        model = ViT(input_shape[1],num_classes=num_class,patch_size=input_shape[1]//16)
        print('selected model is :',select_model)
    else:
        model = PR_EffiNet(select_model=select_model,num_classes=num_class)
        print('selected model is :',select_model)

    # ----------------------------设置优化器和loss,放在预训练之后，resume之前--------------------------
    criterion = torch.nn.CrossEntropyLoss()
    # optimizer = torch.optim.Adam(params=model.parameters(), lr=init_lr)
    optimizer = torch.optim.Adam([{'params':model.parameters(),'initial_lr':init_lr}],lr=init_lr)
    #----------学习率调整方法，这个要放到断点续训后，不然无法支撑last_epoch参数------------------
    scheduler = None
    if lr_method == 'CosineAnnealingLR':
        # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=optimizer, T_max=int(epoch / 2),
        #                                                           eta_min=1e-6)     #余弦退火学习率
        scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=epoch // 4, T_mult=2,
                                                                            eta_min=1e-7)
    elif lr_method == 'StepLR':
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=epoch // 20, gamma=0.5)  # 等间隔学习率
    elif lr_method == 'ReduceLROnPlateau':
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='max', factor=0.5,
                                                                  min_lr=1e-6, patience=epoch // 30, threshold=0.001,
                                                                  cooldown=2)  # 自适应调整学习率   #代码未完善
    elif lr_method == "CyclicLR":
        scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, 1e-8, init_lr, 5, 5, "triangular2",
                                                         scale_mode='cycle',
                                                         cycle_momentum=False, base_momentum=0.8, max_momentum=0.9)

    # -------------------------放入cuda-----------------------
    if cuda:
        if torch.cuda.device_count() > 1:
            model = torch.nn.DataParallel(model)
        cudnn.benchmark = True  # 如果为True则导致cuDNN对多个卷积算法进行基准测试并选择最快的
        model = model.cuda()  # 模型放入gpu

    # ----------------------断点续训---------------------------------------
    if use_checkpoint:
        checkpoint = torch.load(f=checkpoint_path + f"/LatestCkpt.pth", map_location=device)
        start_epoch = checkpoint["epoch"]+1
        model.load_state_dict(checkpoint['net'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])


    if cuda:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.enabled = True
        torch.backends.cudnn.benchmark = True

    for current_epoch in range(start_epoch,epoch):
        #每一个epoch对所有训练集进行训练
        fit_one_epoch(current_epoch=current_epoch,optimizer=optimizer,scheduler=scheduler,loss_function=criterion,model=model,
                      train_data_loader=train_data_loader,val_data_loader=val_data_loader,count_train=count_train,
                      count_val=count_val)




