import copy
import logging
import os
import random
import time
from glob import glob

import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from PIL import Image
from pytorch_toolbelt import losses as L
from torch.autograd import Variable
from torch.cuda.amp import GradScaler, autocast  # need pytorch>1.6
from torch.optim.lr_scheduler import StepLR
from torch.optim.swa_utils import SWALR, AveragedModel
from torch.utils.data import DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter
from torchvision.transforms import functional
from tqdm import tqdm
from utils.metric import IOUMetric
from utils.utils import AverageMeter, inial_logger, second2time

Image.MAX_IMAGE_PIXELS = 1000000000000000
device = 'cuda'

def swa_update_bn(loader, swa_model):
    with torch.no_grad():
        for batch_idx, batch_samples in tqdm(enumerate(loader), total=len(loader), desc='update bn'):
            data, target = batch_samples['image'], batch_samples['label']
            data, target = Variable(
                data.to(device).float()), Variable(target.to(device))
            pred = swa_model(data)


# def smooth(v, w=0.85):
#     last = v[0]
#     smoothed = []
#     for point in v:
#         smoothed_val = last * w + (1 - w) * point
#         smoothed.append(smoothed_val)
#         last = smoothed_val
#     return smoothed

def train_net(cfg, model, train_data, valid_data, device='cuda'):
    
    sys_cfg = cfg.SYSTEM
    train_cfg = cfg.TRAIN
    lr_scheduler_cfg = cfg.TRAIN.LR_SCHEDULER
    swa_cfg = cfg.TRAIN.SWA

    scaler = GradScaler()

    # 网络参数
    train_loader = DataLoader(
        dataset=train_data, batch_size=train_cfg.BATCH_SIZE, shuffle=True, num_workers=sys_cfg.NUM_WORKERS)
    valid_loader = DataLoader(
        dataset=valid_data, batch_size=train_cfg.BATCH_SIZE, shuffle=False, num_workers=sys_cfg.NUM_WORKERS)

    optimizer = optim.AdamW(model.parameters(), lr=train_cfg.LEARNING_RATE,
                           weight_decay=train_cfg.SOLVER.WEIGHT_DECAY)
    # optimizer = optim.SGD(model.parameters(), lr=lr,
    #                       momentum=train_cfg.SOLVER.MOMENTUM, weight_decay=weight_decay)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
        optimizer, T_0=lr_scheduler_cfg.T_STRAT, T_mult=lr_scheduler_cfg.T_MULT, eta_min=1e-5, last_epoch=lr_scheduler_cfg.LAST_EPOCH)

    swa_model = AveragedModel(model)
    swa_scheduler = SWALR(optimizer, swa_lr=swa_cfg.SWA_LR_RATE * train_cfg.LEARNING_RATE)

    DiceLoss_fn = L.DiceLoss(mode='multiclass')  # 联合损失函数
    SoftCrossEntropy_fn = L.SoftCrossEntropyLoss(smooth_factor=0.1)
    criterion = L.JointLoss(first=DiceLoss_fn, second=SoftCrossEntropy_fn, first_weight=0.5, second_weight=0.5).cuda()

    time_with_model = train_cfg.MODEL_NAME + '_' + time.strftime("%m-%d-%H-%M-%S", time.localtime())

    logger = inial_logger(os.path.join(train_cfg.LOG_PATH, time_with_model + '.log'))
    writer = SummaryWriter(log_dir=f'./runs/{time_with_model}')

    _, y, x = train_data.__getitem__(0)['image'].shape
    logger.info('Total Epoch:{} Image_size:({}, {}) Training num:{}  Validation num:{}'.format(
        train_cfg.EPOCHS, x, y, train_data.__len__(), valid_data.__len__()))

    # 主循环
    train_loss_total_epochs, valid_loss_total_epochs, epoch_lr = [], [], []
    train_loader_size = train_loader.__len__()
    valid_loader_size = valid_loader.__len__()
    best_iou = 0
    best_epoch = 0
    best_model = copy.deepcopy(model)
    epoch_start = 0

    # TODO ：断点训练
    # if train_cfg.LOAD_CKPT_PATH is not None:  # 断点训练
    #     ckpt = torch.load(train_cfg.LOAD_CKPT_PATH)
    #     epoch_start = ckpt['epoch'] + 1 
    #     model.load_state_dict(ckpt['state_dict'])
    #     optimizer.load_state_dict(ckpt['optimizer'])

    for epoch in range(epoch_start, train_cfg.EPOCHS):
        epoch_start = time.time()
        # 训练阶段
        model.train()
        train_epoch_loss = AverageMeter()
        train_iter_loss = AverageMeter()
        for batch_idx, batch_samples in enumerate(train_loader):
            train_iou = IOUMetric(10)
            data, target = batch_samples['image'], batch_samples['label']
            data, target = Variable(
                data.to(device).float()), Variable(target.to(device))

            with autocast():
                # forward
                pred = model(data)
                loss = criterion(pred, target)

                # backward
                scaler.scale(loss).backward()
                scaler.step(optimizer)
                scaler.update()
                optimizer.zero_grad()

                if epoch >= swa_cfg.SWA_START:
                    swa_scheduler.step()
                else:
                    scheduler.step(epoch + batch_idx / train_loader_size)

            image_loss = loss.item()
            train_epoch_loss.update(image_loss)
            train_iter_loss.update(image_loss)
            if batch_idx % train_cfg.ITER_EOPCH == 0:
                spend_time = time.time() - epoch_start

                # tensorboard
                writer.add_scalar('Loss/train', train_iter_loss.avg, epoch * train_loader_size + batch_idx)
                writer.add_scalar('Lr', optimizer.param_groups[-1]['lr'], epoch * train_loader_size + batch_idx)

                logger.info('[train] epoch:{} iter:{}/{} {:.2f}% lr:{:.6f} loss:{:.6f} ETA:{}min'.format(
                    epoch, batch_idx, train_loader_size, batch_idx/train_loader_size*100,
                    optimizer.param_groups[-1]['lr'],
                    train_iter_loss.avg, spend_time / (batch_idx+1) * train_loader_size // 60 - spend_time // 60))

                train_iter_loss.reset()

        # 更新swa模型
        if epoch >= swa_cfg.SWA_START:
            swa_model.update_parameters(model)

        # 验证阶段
        model.eval()
        valid_epoch_loss = AverageMeter()
        valid_iter_loss = AverageMeter()
        val_iou = IOUMetric(10)
        with torch.no_grad():
            
            for batch_idx, batch_samples in tqdm(enumerate(valid_loader), total=len(valid_loader)):
                data, target = batch_samples['image'], batch_samples['label']
                data, target = Variable(
                    data.to(device).float()), Variable(target.to(device))

                if epoch >= swa_cfg.SWA_START:
                    pred = swa_model(data)
                else:
                    pred = model(data)

                loss = criterion(pred, target)
                pred = pred.cpu().data.numpy()
                pred = np.argmax(pred, axis=1)
                val_iou.add_batch(pred, target.cpu().data.numpy())
                #
                image_loss = loss.item()
                valid_epoch_loss.update(image_loss)
                valid_iter_loss.update(image_loss)
                
            val_loss = valid_iter_loss.avg
            acc, acc_cls, iu, mean_iu, fwavacc = val_iou.evaluate()

            writer.add_scalar('Val/Loss', val_loss, epoch * train_loader_size)
            writer.add_scalar('Val/Acc', acc, epoch * train_loader_size)
            writer.add_scalar('Val/Acc_cls', acc_cls, epoch * train_loader_size)
            writer.add_scalar('Val/mIOU', mean_iu, epoch * train_loader_size)

            logger.info('[val] epoch:{} miou:{:.2f}'.format(epoch, mean_iu))

        # 保存loss、lr
        train_loss_total_epochs.append(train_epoch_loss.avg)
        valid_loss_total_epochs.append(valid_epoch_loss.avg)
        epoch_lr.append(optimizer.param_groups[0]['lr'])
        # 保存模型
        state = {'epoch': epoch, 'state_dict': model.state_dict(),
                     'optimizer': optimizer.state_dict()}

        if epoch > train_cfg.MIN_EOPCH:
            filename = os.path.join(train_cfg.SAVE_CKPT_PATH, 'checkpoint-latest.pth')
            torch.save(state, filename)  # pytorch1.6会压缩模型，低版本无法加载
            logger.info(
                    '[save] Latest Model saved at epoch:{} <========================3'.format(epoch))

        # 保存最优模型
        if mean_iu > best_iou:  # train_loss_per_epoch valid_loss_per_epoch
            if epoch >= swa_cfg.SWA_START:
                state = {'epoch': epoch, 'state_dict': swa_model.state_dict(),
                         'optimizer': optimizer.state_dict()}
                model_type = 'swa_model'
            else:
                state = {'epoch': epoch, 'state_dict': model.state_dict(),
                         'optimizer': optimizer.state_dict()}
                model_type = 'model'
            filename = os.path.join(train_cfg.SAVE_CKPT_PATH, 'checkpoint-best.pth')
            torch.save(state, filename)
            best_iou = mean_iu
            best_model = copy.deepcopy(model)
            logger.info(
                '[save] Best {} saved at epoch:{} \/\/\/\/\/\/\/\/\/\/\/\/'.format(model_type, epoch))

        # scheduler.step()
    
    # SWA
    # Update bn statistics for the swa_model at the end
    # Call update_bn one time before eval
    swa_update_bn(valid_loader, swa_model)
    state = {'epoch':  train_cfg.EPOCHS, 'state_dict': swa_model.state_dict(),
                     'optimizer': optimizer.state_dict()}
    filename = os.path.join(train_cfg.SAVE_CKPT_PATH, 'checkpoint-final.pth')
    torch.save(state, filename)
    logger.info('[save] Final Model saved +++++++++++++++++++++++++')

    return best_model, swa_model
