from process_data import get_data_list,get_binary_list_stage1,get_binary_list_stage2,adjust_data_list,remake_data_list
from model import *
from val import val_fun
from dataset import Mydataset,mixup_data,mixup_criterion
from loss import FocalLoss
from torch.optim.lr_scheduler import StepLR,CosineAnnealingLR
from warmup_scheduler import GradualWarmupScheduler
from torch.utils.data import DataLoader,Dataset
import torch.nn as nn
import torchvision
import torch
import os
import time
torch.cuda.set_device(0)

Remake = 'stage1'                 ## 将类别进行调整。stage1表示第一阶段，类别1和2合并，。stage2表示只取类别1/2，标签分别为0，1，
Adjust = False                ## 调整数据的分布比例，和线上验证保持一样的4：1：2：3
Freeze = False              # 是否冻结backbone。仅训练fcyt
Diff_Lr = False              ## 是否将fc之外的曾设置0.1倍的学习率
Concat = 0                # False不进行concat操作，0为y轴方向concat，1为x方向concat,2为2*2的方式进行拼接。'sample‘为随机
Shuffle_Concat = True        ## 随机选取一个同一个类别下的其他数据进行替换。增加扰动
Weight_Blance = True        # 在loss中进行加权
Focal_Loss = False
Over_Sample = False        # 对数量较少的类别的数据进行重采样操作
Mix_Up = False              # mixup数据增强操作
Key_Frame = False           # 仅获取关键帧。在concat使用时，必须为True，方便处理。
Normalize = True
Binary = False               # 是否将任务拆解为两个阶段进行二分类操作
Model_Save_Patch = '../model_save/final/1006/resnext50_shuffle_concat_0_adjust_stage1_300'
Record_File_Dir = './record/final/1006/resnext50_shuffle_concat_0_adjust_stage1_300.txt'
# Image_Size = (960,512)
# Image_Size = (800,488)
# Image_Size = (800,450)
Image_Size = (300,170)
# Image_Size = (512,288)
Batch_Size = 4
lr = 1e-5
Num_Epoch = 50
accumlation_step = 4
Num_Class = 4
os.makedirs(Model_Save_Patch,exist_ok=True)
os.makedirs(Record_File_Dir.replace(Record_File_Dir.split('/')[-1],''),exist_ok=True)
## 根据每个类别的数量设置权重。
if Weight_Blance:
    # loss_fun = nn.CrossEntropyLoss(weight=torch.Tensor([1,4,2,1]).cuda())
    loss_fun = nn.CrossEntropyLoss(weight=torch.Tensor([1.5,3,1]).cuda())


elif Focal_Loss:
    loss_fun = FocalLoss().cuda()
else:
    loss_fun = nn.CrossEntropyLoss().cuda()


if Concat is not False:
    Key_Frame = True
if Remake == 'stage1':
    Num_Class = 3
if Remake == 'stage2':
    Num_Class = 2


def train(model):
    data_list_train,data_list_val = get_data_list(only_key_frame=Key_Frame,over_sample=Over_Sample)
    # data_list_val = adjust_data_list(data_list_val)
    if Adjust:
        ## 目前一旦调整train list，验证机集上效果变差。
        data_list_val = adjust_data_list(data_list_val)
        data_list_train = adjust_data_list(data_list_train)
    if Remake is not None:
        data_list_train = remake_data_list(data_list_train,Remake)
        data_list_val = remake_data_list(data_list_val,Remake)
    best_result = 0
    if not os.path.exists(Model_Save_Patch):
        os.makedirs(Model_Save_Patch)
    train_dataSet = Mydataset(data_list_train,train=True,image_size=Image_Size,concat=Concat,normalize = Normalize,shuffle_concat=Shuffle_Concat)
    train_data_loader = DataLoader(dataset=train_dataSet, batch_size=Batch_Size, shuffle=True,
                                   num_workers=6, pin_memory=False)
    val_dataSet = Mydataset(data_list_val, train=False, image_size=Image_Size, concat=Concat, normalize=Normalize)
    val_data_loader = DataLoader(dataset=val_dataSet, batch_size=4, shuffle=True,
                                   num_workers=6, pin_memory=True)

    ## 用来对训练集进行验证，去除了data aug部分操作(train=False)
    val_dataSet_2 = Mydataset(data_list_train, train=False, image_size=Image_Size, concat=Concat, normalize=Normalize)
    val_data_loader_2 = DataLoader(dataset=val_dataSet_2, batch_size=4, shuffle=True,
                                   num_workers=6, pin_memory=True)

    if Diff_Lr:
        ## 其他层学习率为fc的0.1倍
        fc_params = list(map(id, model.fc.parameters()))
        base_params = filter(lambda p: id(p) not in fc_params,
                             model.parameters())
        optimizer = torch.optim.Adam([{'params':base_params,'lr' :lr * 0.1},{'params':model.fc.parameters()}],lr = lr,weight_decay=2e-5)
    else:
        optimizer = torch.optim.Adam(model.parameters(),lr = lr, weight_decay= 2e-5)
    # optimizer = torch.optim.SGD(model.parameters(),lr = lr,momentum=0.9,weight_decay=2e-5)
    scheduler = CosineAnnealingLR(optimizer,T_max=Num_Epoch,)
    # scheduler = GradualWarmupScheduler(optimizer, multiplier=5, total_epoch=10, after_scheduler=scheduler)
    total_step = len(train_data_loader)



    # scaler = torch.cuda.amp.GradScaler()
    for epoch in range(Num_Epoch):
        model.train()
        start_time  = time.time()
        #注意必须每次训练都设置model.train()，因为在val时会设置model.eval(),
        #需要再次进行转换。

        print('epoch{}'.format(epoch+1))
        train_loss = 0
        for step,(batch_image,batch_label) in enumerate(train_data_loader):
            if torch.cuda.is_available():
                batch_image = batch_image.float().cuda()
                batch_label = batch_label.float().long().cuda()
            # print('batch_image',batch_image.shape,'batch_label',batch_label.shape)
            if Mix_Up:
                ## 使用mix up进行data aug
                batch_image,batch_label_a,batch_label_b,lam = mixup_data(batch_image,batch_label)
                pred = model(batch_image)
                loss = mixup_criterion(loss_fun,pred,batch_label_a,batch_label_b,lam)
            # ### 使用pytorch中的函数进行随机缩放
            # batch_image, batch_label = tensor_resize(batch_image,batch_label,train_size=Image_Size)

            else:
                pred = model(batch_image)
                loss = loss_fun(pred,batch_label)
            loss.backward()
            # scaler.scale(loss).backward()
            if (step % accumlation_step) == 0:
                optimizer.step()
                # scaler.step(optimizer)
                optimizer.zero_grad()
            if step % 20 ==0 and epoch <3:
                print('Step [{}/{}],Loss:{:.7f}'.format(step + 1, total_step, loss))
            train_loss += loss.item()

        scheduler.step()
        train_loss = train_loss/total_step
        print('Epoch [{}/{}],Loss:{:.7f}'.format(epoch+1,Num_Epoch,train_loss))

        # checkpoint = {
        #     'epoch':epoch+1,
        #     'model':model.state_dict(),
        #     'optimizer':optimizer.state_dict(),
        # }

        torch.cuda.empty_cache()
        record_file = open(Record_File_Dir,'a')
        val_Acc = val_fun(model,val_data_loader,Remake,)
        print('--train_loss--', train_loss, '--val_Acc--', val_Acc,)
        record_file.write(
            'epoch--' + str(epoch + 1) + '--train_loss--' + str(train_loss) + '--val_ACC--' + str(val_Acc))
        record_file.write('\n')
        if epoch % 1 == 0:
            train_Acc = val_fun(model,val_data_loader_2,Remake)
            print('-------------------------------train_Acc--', train_Acc)
            record_file.write(
                'epoch--' + str(epoch + 1) + '--train_ACC--' + str(train_Acc))
            record_file.write('\n')
        record_file.close()


        torch.cuda.empty_cache()

        torch.save(model.state_dict(), (Model_Save_Patch + '/' + 'new_' + '.pth'))
        if val_Acc > best_result:
            print('**************get better model with acc of *******************',val_Acc)
            torch.save(model.state_dict(), (Model_Save_Patch + '/' + 'best_' + '.pth'))
            best_result = val_Acc
        print('---------------------------------------------')

        if epoch < 3:
            print('use time -------',time.time()-start_time)
        torch.cuda.empty_cache()


if __name__ == '__main__':
    torch.backends.cudnn.enabled = False
    """
    train the model
    """
    # model = model_Xception(num_class=Num_Class,pretrained=True,)
    # model = model_InceptionV4(num_class=Num_Class,pretrained=True)
    model = model_resnext50_32x4d(Num_Class=Num_Class,pretrained=True,freeze_weight=Freeze).cuda()
    # model = model_res18(num_class=Num_Class,pretrained=True)
    # model.load_state_dict(torch.load('../model_save/final/1005/resnext50_shuffle_concat_2_adjust_512/best_.pth'))
    # model_state_dict = model.state_dict()
    # pretrained_state_dict = torch.load('../model_save/final/1005/resnext50_shuffle_concat_2_adjust_512/best_.pth')
    # pretrained_state_dict = {k:v for k,v in pretrained_state_dict.items() if 'fc' not in k}
    # model_state_dict.update(pretrained_state_dict)
    # model.load_state_dict(model_state_dict)

    train(model)

    # #########################
    # Model_Save_Patch = '../model_save/inceptionV4_concat_sample_all_512'
    # Record_File_Dir = './record/inceptionV4_concat_sample_all_512.txt'
    # Over_Sample = True
    # Weight_Blance = False
    # Num_Epoch = 40
    # model = model_InceptionV4(num_class=Num_Class,pretrained=True)
    # train(model)
