#!/usr/bin/env python
# -*- coding: utf-8 -*-
#  @Time    : 2021-02-09 0:07
#  @Author  : lifan
#  @File    : train.py
#  @Software: PyCharm
# @Brief   :


import sys
sys.path.append(".")
import torch
from torch import nn
import pandas as pd
import numpy as np
from torch.cuda.amp import autocast, GradScaler
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from PIL import Image
import datetime
import glob
import os
import random
import time
from dataloader.dataset import MyDataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from prefetch_generator import BackgroundGenerator
from tensorboardX import SummaryWriter
from model.unetmodel import UNet
# from metrics import Evaluator
from Loss_fn_and_evaluate import *
import multiprocessing
from optim.opt import RAdam
import cv2
import segmentation_models_pytorch.segmentation_models_pytorch as smp
from metric.metric import *


CFG = {
    'fold_num': 1,
    'seed': 1,
    'epochs': 50,
    'images_size': 256,
    'train_bs': 32,
    'valid_bs': 32,
    'T_0': 10,
    'lr': 5e-2,
    'min_lr': 1e-6,
    'weight_decay': 1e-6,
    'num_workers': multiprocessing.cpu_count(),
    'accum_iter': 2,  # suppoprt to do batch accumulation for backprop with effectively larger batch size
    'verbose_step': 1,
    'device': 'cuda:1',
    'best_pred': 0.0,
}

# config
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
device = torch.device(CFG['device'])
writer = SummaryWriter(comment='train_loss/val_loss')


# Fixed seed
def set_seed(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = True
    torch.backends.cudnn.deterministic = True


def train(train_loader, val_loader, num_epochs, fold_num):
    scaler = GradScaler()
    val_interval = 1
    dice_ = DiceScore()
    train_dice, val_dice = [], []
    loss_fn = nn.BCEWithLogitsLoss().to(device)
    loss_mixedloss = MixedLoss(10.0, 2.0)
    #net = UNet3d(in_channels=3, output_channel=1).to(device)
    net = UNet().to(device)
    # optimizer = torch.optim.SGD(net.parameters(), lr=CFG['lr'], momentum=0.9, weight_decay=CFG['weight_decay'],
    #                             nesterov=True)
    # optimizer = torch.optim.Adam(net.parameters(), lr=CFG['lr'], weight_decay=CFG['weight_decay'])
    optimizer = RAdam(net.parameters(), lr=CFG['lr'])
    # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=80, gamma=0.1)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=CFG['T_0'], T_mult=1, eta_min=CFG['min_lr'], last_epoch=-1)
    # scheduler = ReduceLROnPlateau(optimizer, mode="min", patience=5, verbose=True)
    # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=80, min_lr=1e-10)
    if os.path.exists("./weights") is False:
        os.mkdir("./weights")
    save_path = "./weights/unet3d_%d.pth" % (fold_num)
    print("Start Training...")
    nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print("==========" * 8 + "%s" % nowtime)

    for epoch in range(num_epochs):

        # train
        net.train()
        running_loss = 0.0
        running_train_dice, running_val_dice = [], []
        t1 = time.perf_counter()  # 开始时间
        pbar = tqdm(enumerate(train_loader), total=len(train_loader))

        for step, data in BackgroundGenerator(pbar):
            images, labels = data
            assert images.size()[2:] == labels.size()[2:]
            images, labels = images.to(device), labels.to(device)

            with autocast():
                image_preds = net(images)
                loss = loss_mixedloss(image_preds, labels)
                scaler.scale(loss).backward()

                running_loss += loss.item()
                dice_trn = dice_(image_preds, labels)

                if ((step + 1) % CFG['accum_iter'] == 0) or ((step + 1) == len(train_loader)):
                    scaler.step(optimizer)
                    scaler.update()
                    optimizer.zero_grad()

                if ((step + 1) % CFG['verbose_step'] == 0) or ((step + 1) == len(train_loader)):
                    pbar.set_description('Train loss: %.4f' % (running_loss / (step + 1)))

                running_train_dice.append(dice_trn.item())


        epoch_train_dice = np.mean(running_train_dice)
        train_dice.append(epoch_train_dice)  # 把每个epoch的dice score放入 train_dice

        # 终端打印
        if scheduler is not None:
            print("[epoch %d] learning rate %f" % (epoch + 1, optimizer.param_groups[0]["lr"]))
            scheduler.step(running_loss/(step+1))

        print("[epoch %d] time consumed %.2f" % (epoch + 1, time.perf_counter() - t1))
        print("[epoch %d] running_loss: %.4f" % (epoch + 1, running_loss))
        print("[epoch %d] running_dice_score: %.4f" % (epoch + 1, epoch_train_dice))
        writer.add_scalar("train loss", running_loss / step + 1, epoch)

        # validate
        if (epoch + 1) % val_interval == 0:

            net.eval()
            test_loss = 0.0
            pbar = tqdm(enumerate(val_loader), total=len(val_loader))
            with torch.no_grad():

                for step, test_data in BackgroundGenerator(pbar):
                    val_images, val_labels = test_data
                    val_images, val_labels = val_images.to(device), val_labels.to(device)
                    outputs = net(val_images)
                    loss = loss_fn(outputs, val_labels)
                    test_loss += loss.item()
                    pbar.set_description('Test loss: %.4f' % (test_loss / (step + 1)))
                    dice_val = dice_(outputs, val_labels)
                    running_val_dice.append(dice_val.item())



                epoch_val_dice = np.mean(running_val_dice)
                val_dice.append(epoch_val_dice)

                print('Loss: %.3f' % test_loss)
                print('Dice Score: %.4f' % (epoch_val_dice))


                new_pred = epoch_val_dice
                if new_pred > CFG['best_pred']:
                    CFG['best_pred'] = new_pred
                    torch.save(net.state_dict(), save_path)
                    print('[epoch %d], save model!!' % (epoch+1))

                nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                print("\n" + "==========" * 8 + "%s" % nowtime)
            torch.cuda.empty_cache()

    return train_dice, val_dice


if __name__ == '__main__':
    print(time.asctime(time.localtime(time.time())), "Start Training!!!")


    set_seed(CFG['seed'])
    train_dataset = MyDataset(fold=CFG['fold_num'], mode="train", images_size=CFG['images_size'])
    train_dataloader = DataLoader(train_dataset, batch_size=CFG['train_bs'], shuffle=True,
                                  num_workers=CFG['num_workers'], pin_memory=torch.cuda.is_available(), drop_last=True)

    val_datasets = MyDataset(fold=CFG['fold_num'], mode="val", images_size=CFG['images_size'])
    val_dataloader = DataLoader(val_datasets, batch_size=CFG['valid_bs'], shuffle=False,
                                num_workers=CFG['num_workers'], pin_memory=torch.cuda.is_available(), drop_last=True)
    train_dice, val_dice = train(train_dataloader, val_dataloader, CFG['epochs'], CFG['fold_num'])
    print("Fold %d, val mean dice score %f, best dict %f" % (CFG['fold_num'], np.mean(val_dice),CFG['best_pred']))

    # 将每一折dice的输出 写入txt
    if os.path.exists("./dice_score_result") is False:
        os.mkdir("./dice_score_result")
    with open('./dice_score_result/fold_{}.txt'.format(CFG['fold_num']), 'w') as f:
        f.write('mean_dice: {},\nbest_dice: {}\n'.format(np.mean(val_dice), CFG['best_pred']))




    print(time.asctime(time.localtime(time.time())), "Finished Training!!!")
