#!/usr/bin/env python
# -*- coding: utf-8 -*-
#  @Time    : 2021-02-09 0:07
#  @Author  : lifan
#  @File    : train.py
#  @Software: PyCharm
# @Brief   : 韩莹莹的unet训练模块


import sys

sys.path.append("..")
import torch
from torch import nn
import numpy as np
# from torch.cuda.amp import autocast, GradScaler
import datetime
import os
import random
import time
from dataloader.dataset import MyDataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from prefetch_generator import BackgroundGenerator
from model.unet_model import UNet
from loss.loss import *
import multiprocessing
from optim.opt import RAdam
from metric.metric import *
import matplotlib.pyplot as plt
from ternausnet.models import UNet16

CFG = {
    'fold_num': 1,
    'seed': 1,
    'epochs': 500,
    'images_size': 224,
    'train_bs': 16,
    'valid_bs': 10,
    'T_0': 10,
    'lr': 5e-4,
    'min_lr': 1e-7,
    'weight_decay': 1e-4,
    'num_workers': multiprocessing.cpu_count(),
    'accum_iter': 2,  # suppoprt to do batch accumulation for backprop with effectively larger batch size
    'verbose_step': 1,
    'device': 'cuda:0',
    'best_iou': float("-inf"),
}

# config
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
device = torch.device(CFG['device'])


# Fixed seed
def set_seed(seed):
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = True
    torch.backends.cudnn.deterministic = True


def plot(scores, name, save_path):
    plt.figure(figsize=(15, 5))
    plt.plot(range(len(scores["train"])), scores["train"], label=f'train {name}')
    plt.plot(range(len(scores["train"])), scores["val"], label=f'val {name}')
    plt.title(f'{name} plot')
    plt.xlabel('Epoch')
    plt.ylabel(f'{name}')
    plt.legend()
    plt.savefig(save_path)


def train(train_loader, val_loader, num_epochs, fold_num):
    val_interval = 1
    phases = ["train", "val"]
    losses = {phase: [] for phase in phases}
    iou_scores = {phase: [] for phase in phases}
    dice_scores = {phase: [] for phase in phases}
    loss_fn = nn.BCEWithLogitsLoss().to(device)
    loss_mixedloss = MixedLoss(10.0, 2.0)
    net = UNet16(num_classes=1, num_filters=32, pretrained=True).to(device)
    # net = UNet().to(device)
    # optimizer = torch.optim.Adam(net.parameters(), lr=CFG['lr'], weight_decay=CFG['weight_decay'])
    optimizer = RAdam(net.parameters(), lr=CFG['lr'], weight_decay=CFG['weight_decay'])
    scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=CFG['T_0'], T_mult=1,
                                                                     eta_min=CFG['min_lr'], last_epoch=-1)
    # scheduler = ReduceLROnPlateau(optimizer, mode="min", patience=5, verbose=True)
    if os.path.exists("./weights") is False:
        os.mkdir("./weights")
    save_path = "./weights/unet_weights_%d.pth" % (fold_num)
    print("Start Training...")
    nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print("==========" * 8 + "%s" % nowtime)

    for epoch in range(num_epochs):

        # train
        net.train()
        running_loss = 0.0
        phase = "train"
        start = time.strftime("%H:%M:%S")
        meter = Meter(phase, epoch)
        t1 = time.perf_counter()
        pbar = tqdm(enumerate(train_loader), total=len(train_loader))

        for step, data in BackgroundGenerator(pbar):
            images, labels = data
            assert images.size()[2:] == labels.size()[2:]
            images, labels = images.to(device), labels.to(device)

            outputs = net(images)
            loss = loss_mixedloss(outputs, labels)
            loss.backward()
            running_loss += loss.item()
            optimizer.step()
            optimizer.zero_grad()

            if ((step + 1) % CFG['verbose_step'] == 0) or ((step + 1) == len(train_loader)):
                pbar.set_description('Train loss: %.4f' % (running_loss / (step + 1)))

            labels = labels.cpu()
            outputs = outputs.detach().cpu()
            meter.update(labels, outputs)

        epoch_loss = running_loss / (step + 1)
        dice, iou = epoch_log(phase, epoch, epoch_loss, meter, start)

        losses[phase].append(epoch_loss)
        dice_scores[phase].append(dice)
        iou_scores[phase].append(iou)
        torch.cuda.empty_cache()

        # 终端打印
        if scheduler is not None:
            print("[epoch %d] learning rate %f" % (epoch + 1, optimizer.param_groups[0]["lr"]))
            scheduler.step()  # running_loss/(step+1)

        print("[epoch %d] time consumed %.2f" % (epoch + 1, time.perf_counter() - t1))
        # print("[epoch %d] running_loss: %.4f" % (epoch + 1, epoch_loss))
        # print("[epoch %d] running_iou_score: %.4f" % (epoch + 1, epoch_train_dice))

        # validate
        if (epoch + 1) % val_interval == 0:

            net.eval()
            phase = "val"
            meter = Meter(phase, epoch)
            start = time.strftime("%H:%M:%S")
            running_loss = 0.0
            pbar = tqdm(enumerate(val_loader), total=len(val_loader))
            with torch.no_grad():

                for step, test_data in BackgroundGenerator(pbar):
                    val_images, val_labels = test_data
                    val_images, val_labels = val_images.to(device), val_labels.to(device)
                    outputs = net(val_images)
                    loss = loss_mixedloss(outputs, val_labels)
                    running_loss += loss.item()
                    pbar.set_description('Test loss: %.4f' % (running_loss / (step + 1)))

                    outputs = outputs.detach().cpu()
                    val_labels = val_labels.cpu()
                    meter.update(val_labels, outputs)

                epoch_loss = running_loss / (step + 1)
                dice, iou = epoch_log(phase, epoch, epoch_loss, meter, start)
                losses[phase].append(epoch_loss)
                dice_scores[phase].append(dice)
                iou_scores[phase].append(iou)
                torch.cuda.empty_cache()

                if iou > CFG['best_iou']:
                    CFG['best_iou'] = iou
                    torch.save(net.state_dict(), save_path)
                    print('[epoch %d], save model!!' % (epoch + 1))

                nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                print("\n" + "==========" * 8 + "⏰ %s" % nowtime)
            torch.cuda.empty_cache()

    return losses, dice_scores, iou_scores


if __name__ == '__main__':
    print(time.asctime(time.localtime(time.time())), "Start Training!!!")

    set_seed(CFG['seed'])
    train_dataset = MyDataset(fold=CFG['fold_num'], mode="train", images_size=CFG['images_size'])
    train_dataloader = DataLoader(train_dataset, batch_size=CFG['train_bs'], shuffle=True,
                                  num_workers=CFG['num_workers'], pin_memory=torch.cuda.is_available())

    val_datasets = MyDataset(fold=CFG['fold_num'], mode="val", images_size=CFG['images_size'])
    val_dataloader = DataLoader(val_datasets, batch_size=CFG['valid_bs'], shuffle=False,
                                num_workers=CFG['num_workers'], pin_memory=torch.cuda.is_available())
    losses, dice_scores, iou_scores = train(train_dataloader, val_dataloader, CFG['epochs'], CFG['fold_num'])

    # 将每一折dice的输出 写入txt
    if os.path.exists("./10_folds_score_result") is False:
        os.mkdir("./10_folds_score_result")

    with open('./10_folds_score_result/train_fold_{}.txt'.format(CFG['fold_num']), 'a') as f:
        f.seek(0)
        f.truncate()   #清空文件

    with open('./10_folds_score_result/val_fold_{}.txt'.format(CFG['fold_num']), 'a') as f:
        f.seek(0)
        f.truncate()   #清空文件


    with open('./10_folds_score_result/train_fold_{}.txt'.format(CFG['fold_num']), 'w') as f:
        f.write('losses: {},\ndice_scores: {},\niou_scores: {}\n'.format(losses["train"], dice_scores["train"],
                                                                         iou_scores["train"]))

    with open('./10_folds_score_result/val_fold_{}.txt'.format(CFG['fold_num']), 'w') as f:
        f.write('losses: {},\ndice_scores: {},\niou_scores: {}\n'.format(losses["val"], dice_scores["val"],
                                                                         iou_scores["val"]))

    plot(losses, "BCE loss", "./10_folds_score_result/bce_loss_fold_{}.png".format(CFG['fold_num']))
    plot(dice_scores, "Dice score", "./10_folds_score_result/dice_scores_fold_{}.png".format(CFG['fold_num']))
    plot(iou_scores, "IoU score", "./10_folds_score_result/iou_scores_fold_{}.png".format(CFG['fold_num']))

    print(time.asctime(time.localtime(time.time())), "Finished Training!!!")
