# encoding: utf-8
#!/usr/bin/python3

import warnings
warnings.filterwarnings("ignore")

import torch
import os
import pandas as pd
from torch.utils.data import DataLoader
import torch.nn as nn
from dataset import MyDataset
from val import val_fun
from torch.optim.lr_scheduler import StepLR,CosineAnnealingLR
# from warmup_scheduler import GradualWarmupScheduler
from models.model import model_cpn50,model_small_seghrnet,model_simple_pose_res18,model_rsn18,model_Unet_efficient_b3,\
    model_simple_pose_res101,model_unet,model_resnet_unet,model_DLinknet34,model_resnet_unet_GN,model_hrnet
from data_aug import tensor_resize
import time
from loss import HeatmapLoss

# ## image_1
# Image_Size = (384,256)
## image_2
#{height * width}
Image_Size = (768,576)
Batch_Size = 6
Sigma = 19
# Sigma =[7,7,15,25]
lr = 1e-4
num_epoch = 300
accumlation_step = 8
# Num_Class = 26
Num_Class = 10
loss_fun = nn.MSELoss()# loss_fun = SpLoss()
# loss_fun = HeatmapLoss()

Model_Save_Patch = '../model_save/0304/baseline'
Record_File_Dir = './record/0304_baseline.txt'

Data_Path = './data_0304/images'
Json_Dir = 'label_file_0304/point_0304_train.json'
Heatmap_Path = './data/heatmap_640_512_group_01'
print(Record_File_Dir)
os.makedirs(Record_File_Dir.replace(Record_File_Dir.split('/')[-1],''),exist_ok=True)


def train(model):

    if not os.path.exists(Model_Save_Patch):
        os.makedirs(Model_Save_Patch)
    My_dataSet = MyDataset(data_path=Data_Path,json_dir=Json_Dir,image_size = Image_Size ,
                           data_aug=True,sigma=Sigma,heatmap_path=Heatmap_Path)
    train_data_loader = DataLoader(dataset=My_dataSet, batch_size=Batch_Size, shuffle=True,
                                   num_workers=6, pin_memory=True)

    optimizer = torch.optim.Adam(model.parameters(),lr = lr, weight_decay= 2e-5)
    # optimizer = torch.optim.SGD(model.parameters(),lr = lr,momentum=0.9,weight_decay=2e-5)
    scheduler = CosineAnnealingLR(optimizer,T_max=num_epoch,)
    # after_scheduler = StepLR(optimizer,step_size=8,gamma=0.5)
    # scheduler = GradualWarmupScheduler(optimizer, multiplier=10, total_epoch=30, after_scheduler=scheduler)
    total_step = len(train_data_loader)


    for epoch in range(num_epoch):
        model.train()
        start_time  = time.time()
        #注意必须每次训练都设置model.train()，因为在val时会设置model.eval(),
        #需要再次进行转换。

        print('epoch{}'.format(epoch+1))
        scheduler.step()
        train_loss = 0
        for step,(batch_image,batch_label) in enumerate(train_data_loader):
            # pass
            if torch.cuda.is_available():
                batch_image = batch_image.float().cuda()
                batch_label = batch_label.float().cuda()
            # ### 使用pytorch中的函数进行随机缩放,同时调整长宽比例
            # batch_image, batch_label = tensor_resize(batch_image,batch_label,train_size=Image_Size,p=0.8)
            # print(batch_image.shape,batch_label.shape)
            # print(batch_image.shape)
            pred = model(batch_image)
            loss = loss_fun(pred,batch_label)
            loss.backward()
            if (step % accumlation_step) == 0:
                optimizer.step()
                optimizer.zero_grad()
            if step % 20 ==0 and epoch <3:
                print('Step [{}/{}],Loss:{:.7f}'.format(step + 1, total_step, loss))
            train_loss += loss.item()

        train_loss = train_loss/total_step
        print('Epoch [{}/{}],Loss:{:.7f}'.format(epoch+1,num_epoch,train_loss))

        # checkpoint = {
        #     'epoch':epoch+1,
        #     'model':model.state_dict(),
        #     'optimizer':optimizer.state_dict(),
        # }
        if ((epoch % 5 == 0) and (epoch > 300or epoch < 15)) or (epoch % 10 ==0):
            torch.cuda.empty_cache()
            torch.save(model.state_dict(),(Model_Save_Patch+ '/'+str(epoch+1)+'.pth'))
            val_recall, val_l2_mean, val_l2_medium = val_fun(model,
                                Data_Path,Json_Dir.replace('train','val'), Image_Size, distance=2,spacing=0.545)
            train_recall, train_l2_mean, train_l2_medium = val_fun(model,
                                        Data_Path,Json_Dir,Image_Size, distance=2,spacing=0.545)
            print('val result--epoch',str(epoch+1),'--val_recall',val_recall,'--val_l2_mean--',val_l2_mean,
                  '--val_l2_medium--',val_l2_medium)
            print('train result--epoch', str(epoch + 1), '--train_recall', train_recall, '--train_l2_mean--', train_l2_mean,
                  '--train_l2_medium--', train_l2_medium)

            print('---------------------------------------------')
            record_file = open(Record_File_Dir,'a')
            record_file.write(
                'epoch--'+str(epoch+1)+'--train_loss--'+ str(train_loss)+'--val_recall--'+str(val_recall)+
                '--val_l2_mean--'+str(val_l2_mean)+'--val_l2_medium--'+str(val_l2_medium))
            record_file.write('\n')
            record_file.write('--train_recall--' + str(train_recall) +'--train_l2_mean--' + str(train_l2_mean)
                              + '--train_l2_medium--' + str(train_l2_medium))
            record_file.write('\n')
            record_file.close()
            torch.cuda.empty_cache()
        if epoch < 8:
            print('use time -------',time.time()-start_time)
        torch.save(model.state_dict(),Model_Save_Patch+'/'+str(num_epoch+1)+'.pth')



def load_pretrained_state_dict(model,pre_dir):
    pre_dict = torch.load(pre_dir)
    model_dict = model.state_dict()
    new_model_dict = {k:v for k,v in pre_dict.items() if k in model_dict.keys() and 'out' not in k}
    model_dict.update(new_model_dict)
    model.load_state_dict(model_dict)
    return model




if __name__ == '__main__':
    os.environ['CUDA_VISIBLE_DEVICES'] = '0,2,3'
    torch.backends.cudnn.benchmark = True
    # train(model_Unet_efficient_b3(num_class=Num_Class))
    model = model_resnet_unet_GN(layer='resnet34',use_aspp = True,num_classes=Num_Class,pre_train=False)
    
    # load_pretrained_state_dict(model,'../model_save/Group_1224/11.pth')
    model = nn.DataParallel(model).cuda()
    model.load_state_dict(torch.load('../model_save/0304/baseline/101.pth'))
    
    train(model)
    # train(model_hrnet(pretrained=True,num_class=Num_Class,upconv=False))
    # train(model_hrnet(pretrained=True,num_class=Num_Class,upconv=True))

    # train(model_small_seghrnet(Num_Class))
    # train(model_rsn18(num_class=Num_Class))
    # train(model_simple_pose_res101(pretrain=True,num_class=Num_Class))