# encoding: utf-8
# !/usr/bin/python3

import warnings

warnings.filterwarnings("ignore")

import torch
import os
import numpy as np
from torch.utils.data import DataLoader
import torch.nn as nn
from dataset import MyDataset
from val import val_fun
from torch.optim.lr_scheduler import StepLR, CosineAnnealingLR
from models.model import model_deeplab_v3_xception,model_deeplab_v3_resnet
# from warmup_scheduler import GradualWarmupScheduler
from models.model import model_cpn50, model_small_seghrnet, model_simple_pose_res18, model_rsn18, \
    model_Unet_efficient_b3, \
    model_simple_pose_res101, model_unet, model_resnet_unet, model_DLinknet34, model_resnet_unet_GN, model_hrnet
import time


Batch_Size = 2
lr = 1e-3
num_epoch = 50
accumlation_step = 8
val_epoch = 2
# loss_fun = nn.CrossEntropyLoss(weight=torch.Tensor([1,1,30,10,10,10,10,20,1,30]).cuda())
loss_fun = nn.CrossEntropyLoss()
Model_Save_Patch = '../model_save/Unet_deeplabv3_res_0126'
Record_File_Dir = './record/Unet_deeplabv3_res_0126.txt'



Image_Path = '../suichang_round1_train_210120\suichang_round1_train_210120'
Train_Json_Dir = 'label_file/train.json'
Val_Json_Dir = 'label_file/val.json'
os.makedirs(Record_File_Dir.replace(Record_File_Dir.split('/')[-1], ''), exist_ok=True)
os.makedirs(Model_Save_Patch,exist_ok=True)

def train(model):
    if not os.path.exists(Model_Save_Patch):
        os.makedirs(Model_Save_Patch)
    train_dataSet = MyDataset(image_path=Image_Path, json_dir=Train_Json_Dir, data_aug=False)
    train_data_loader = DataLoader(dataset=train_dataSet, batch_size=Batch_Size, shuffle=True,
                                   num_workers=4, pin_memory=True)
    val_dataSet = MyDataset(image_path=Image_Path, json_dir=Val_Json_Dir, data_aug=False)
    val_data_loader = DataLoader(dataset=val_dataSet, batch_size=Batch_Size * 2, shuffle=True,
                                 num_workers=4, pin_memory=True)

    val_dataSet2 = MyDataset(image_path=Image_Path, json_dir=Train_Json_Dir, data_aug=False)
    val_data_loader2 = DataLoader(dataset=val_dataSet2, batch_size=Batch_Size * 2, shuffle=True,
                                  num_workers=4, pin_memory=True)
    # optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=2e-5)
    optimizer = torch.optim.SGD(model.parameters(),lr = lr,momentum=0.9,weight_decay=2e-5)
    scheduler = CosineAnnealingLR(optimizer, T_max=num_epoch, )
    # scheduler = StepLR(optimizer,step_size=6,gamma=0.6)
    # scheduler = GradualWarmupScheduler(optimizer, multiplier=2, total_epoch=5, after_scheduler=scheduler)
    total_step = len(train_data_loader)

    best_miou = 0
    for epoch in range(num_epoch):


        model.train()
        start_time = time.time()
        # 注意必须每次训练都设置model.train()，因为在val时会设置model.eval(),
        # 需要再次进行转换。

        print('epoch{}'.format(epoch + 1))
        train_loss = 0
        for step, (batch_image, batch_label) in enumerate(train_data_loader):
            # pass
            if torch.cuda.is_available():
                batch_image = batch_image.float().cuda()
                batch_label = batch_label.float().cuda()
            # ### 使用pytorch中的函数进行随机缩放,同时调整长宽比例
            # batch_image, batch_label = tensor_resize(batch_image,batch_label,train_size=Image_Size,p=0.8)
            # print(batch_image.shape,batch_label.shape)
            # print(batch_image.shape)
            pred = model(batch_image)
            loss = loss_fun(pred, batch_label.long())
            loss.backward()
            if (step % accumlation_step) == 0:
                optimizer.step()
                optimizer.zero_grad()
            if step % 20 == 0 and epoch < 3:
                print('Step [{}/{}],Loss:{:.7f}'.format(step + 1, total_step, loss))
            train_loss += loss.item()

        train_loss = train_loss / total_step
        print('Epoch [{}/{}],Loss:{:.7f}'.format(epoch + 1, num_epoch, train_loss))

        if epoch % val_epoch == 0:
            torch.cuda.empty_cache()
            val_iou = val_fun(model, val_data_loader)
            print('mean val iou', np.nanmean(val_iou))
            if np.nanmean(val_iou) > best_miou:
                torch.save(model.state_dict(), (Model_Save_Patch + '/' + 'best.pth'))
                best_miou = np.nanmean(val_iou)
            record_file = open(Record_File_Dir, 'a')
            record_file.write(
                'epoch--' + str(epoch + 1) + '--train_loss--' + str(train_loss) + '--iou--' + str(
                    val_iou) + '--miou--' + str(np.nanmean(val_iou)))
            record_file.write('\n')
            record_file.close()
            torch.cuda.empty_cache()

            if epoch % (val_epoch*2) == 0 and epoch > 30:
                torch.cuda.empty_cache()
                train_iou = val_fun(model, val_data_loader2)
                print('mean train iou', np.nanmean(train_iou))
                record_file = open(Record_File_Dir, 'a')
                record_file.write('epoch--' + str(epoch + 1) +  '--train_iou--'+str(train_iou)+ '--train_miou--' + str(np.nanmean(train_iou)))
                record_file.write('\n')
                record_file.close()
                torch.cuda.empty_cache()
        scheduler.step()



if __name__ == '__main__':
    torch.backends.cudnn.benchmark = True
    # model = model_resnet_unet_GN(layer='resnet101', use_aspp=True, num_classes=10, pre_train=False)
    model = model_deeplab_v3_resnet(num_class=10).cuda()
    model.load_state_dict(torch.load(Model_Save_Patch + '/best.pth'))

    train(model)
    # train(model_hrnet(pretrained=True,num_class=Num_Class,upconv=False))
    # train(model_hrnet(pretrained=True,num_class=Num_Class,upconv=True))

    # train(model_small_seghrnet(Num_Class))
    # train(model_rsn18(num_class=Num_Class))
    # train(model_simple_pose_res101(pretrain=True,num_class=Num_Class))