# coding: utf-8
"""Run training."""

from pathlib import Path
import shutil
import os
import time
import typing
import datetime
import logging
import numpy as np

import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.backends.cudnn as cudnn
import torch.nn.parallel
import torchvision

from .dataset import CoviarDataSet
from .model import CoviarLstm, Coviar, CrossEntropyLabelSmoothLoss, CoviarConvLstm
from .transforms import GroupCenterCrop
from .transforms import GroupScale
from . import getLogger
SAVE_FREQ = 40
PRINT_FREQ = 20
best_prec1 = 0
log: logging.Logger


# 装饰器实现静态变量
def static_vars(**kwargs):
    def decorate(func):
        for k in kwargs:
            setattr(func, k, kwargs[k])
        return func
    return decorate


def main(cfg):
    global log
    log = getLogger(cfg)
    os.environ["CUDA_VISIBLE_DEVICES"] = cfg.gpu
    global best_prec1

    start_tim = datetime.datetime.now()
    log.info(f"Start time: {start_tim.strftime('%Y-%m-%d %H:%M:%S')}")
    # log.info('GPU cards: %d Titan Xp' % (len(args.gpus)))
    log.info('GPU cards: %d Titan Xp' % (torch.cuda.device_count()))

    log.info('Training arguments:')
    for k, v in vars(cfg).items():
        if k[0] != '_':
            log.info('\t{}: {}'.format(k, v))

    num_class = cfg.num_class

    if cfg.version == 'coviar-lstm':
        model = CoviarLstm(cfg)
    elif cfg.version == 'coviar':
        model = Coviar(cfg)
    elif cfg.version == 'coviar-convlstm':
        model = CoviarConvLstm(cfg)

    if cfg.weights:
        model_dict = model.state_dict()
        checkpoint = torch.load(cfg.weights)
        log.info(
            f"model epoch {checkpoint['epoch']} "
            f"best prec@1: {checkpoint['best_prec1']}")
        base_dict = {'.'.join(k.split('.')[1:]): v
                     for k, v in list(checkpoint['state_dict'].items())
                     if 'base_model.fc' not in k}  # TODO 不需要list
        model_dict.update(base_dict)

        # for k in base_dict.keys():
        #     log.info(k)
        model.load_state_dict(model_dict)

    if cfg.original:
        from .dataset_ext import OriginalDataSet
        train_loader = torch.utils.data.DataLoader(
            OriginalDataSet(
                cfg,
                video_list=None,
                transform=model.get_augmentation(),
            ),
            batch_size=cfg.batch_size, shuffle=True,
            num_workers=cfg.workers, pin_memory=True)

        import copy
        val_cfg = copy.deepcopy(cfg)
        val_cfg.mode = 'validation'
        val_loader = torch.utils.data.DataLoader(
            OriginalDataSet(
                val_cfg,
                video_list=None,
                transform=torchvision.transforms.Compose([
                    GroupScale(int(model.scale_size)),
                    GroupCenterCrop(model.crop_size),
                ]),
            ),
            batch_size=cfg.batch_size, shuffle=False,
            num_workers=cfg.workers, pin_memory=True)
    else:
        train_loader = torch.utils.data.DataLoader(
            CoviarDataSet(
                cfg,
                video_list=cfg.train_list,
                transform=model.get_augmentation(),
            ),
            batch_size=cfg.batch_size, shuffle=True,  # data reshuffled at every epoch
            num_workers=cfg.workers, pin_memory=True)  # 锁页内存

        val_loader = torch.utils.data.DataLoader(
            CoviarDataSet(
                cfg,
                video_list=cfg.test_list,
                transform=torchvision.transforms.Compose([
                    GroupScale(int(model.scale_size)),
                    GroupCenterCrop(model.crop_size),
                ]),
            ),
            batch_size=cfg.batch_size, shuffle=False,
            num_workers=cfg.workers, pin_memory=True)

    # 将两步并为一步
    #   1. This container parallelizes the application of
    #       the given module by splitting the input across the
    #       specified devices by chunking in the batch dimension.
    #       将batch拆开并行
    #   2. Moves all model parameters and buffers to the GPU.
    # model = torch.nn.DataParallel(model, device_ids=args.gpus).cuda()
    model = torch.nn.DataParallel(model).cuda()
    # model: torch.nn.parallel.data_parallel.DataParallel
    # TODO 在程序刚开始加这条语句可以提升一点训练速度(why)
    cudnn.benchmark = True

    # 返回在模型参数上的迭代器,
    #   yielding both the name of the parameter as well as the parameter itself
    # Yields: (string, Parameter) – Tuple containing the name and parameter
    params_dict = dict(model.named_parameters())
    params = []
    for key, value in params_dict.items():
        if cfg.freeze is True:  # TODO change the type of 'freeze' for extend
            if 'lstm' in key or 'fc' in key:
                log.info(key)
                decay_mult = 0.0 if 'bias' in key else 1.0
                lr = cfg.lstm_lr if 'lstm' in key else cfg.lr
                # TODO set lr_mult
                params += [{'params': value, 'lr': lr,
                            'lr_mult': 1.0, 'decay_mult': decay_mult}]
            else:
                value.requires_grad = False
        else:
            decay_mult = 0.0 if 'bias' in key else 1.0

            if ('module.base_model.conv1' in key
                    or 'module.base_model.bn1' in key
                    or 'data_bn' in key) and cfg.representation in ['mv', 'residual']:
                lr_mult = 0.1
            elif '.fc.' in key or 'lstm' in key:  # 与冻结时lstm_lr_mult一致
                lr_mult = 1.0
            else:
                lr_mult = 0.01
            lr = cfg.lstm_lr if 'lstm' in key else cfg.lr

            # log.info(f'name = {key:<48}, lr = {lr:<6}, lr_mult = {lr_mult:<6}, decay_mult = {decay_mult}')
            params += [{'params': value, 'lr': lr,
                        'lr_mult': lr_mult, 'decay_mult': decay_mult}]

    optimizer = torch.optim.Adam(
        params,
        weight_decay=cfg.weight_decay,
        eps=0.001)  # 为了提高数值稳定性
    # (分类)准则
    # criterion = torch.nn.CrossEntropyLoss().cuda()
    criterion = CrossEntropyLabelSmoothLoss(cfg.num_class).cuda()
    scheduler = ReduceLROnPlateau(optimizer, 'min', patience=10)

    for epoch in range(cfg.epochs):
        # 返回值cur_lr仅用于打印信息, lr在函数中已经调整
        cur_lr = adjust_learning_rate(cfg, optimizer, epoch)
        # cur_lr = get_lr(optimizer)
        train(cfg, train_loader, model, criterion, optimizer, epoch, cur_lr)

        # 评测时间到 or 终了
        if epoch % cfg.eval_freq == 0 or epoch == cfg.epochs - 1:
            with torch.no_grad():  # 测试时不会进行梯度计算, 节约内存
                prec1 = validate(cfg, val_loader, model, criterion, scheduler)

            is_best = prec1 > best_prec1
            best_prec1 = max(prec1, best_prec1)
            # 最好的weights出现了 or checkpoint保存时间到
            if is_best or epoch % SAVE_FREQ == 0:
                save_checkpoint(
                    cfg,
                    {
                        'epoch': epoch + 1,
                        'arch': cfg.arch,
                        'state_dict': model.state_dict(),
                        'best_prec1': best_prec1,
                    },
                    is_best,
                    filename='checkpoint.pth.tar')

    end_tim = datetime.datetime.now()
    log.info(f"End time: {end_tim.strftime('%Y-%m-%d %H:%M:%S')}")
    total_tim = end_tim - start_tim  # timedelta
    log.info(f'Total time: {total_tim}')


def train(cfg, train_loader, model, criterion, optimizer, epoch, cur_lr):
    global log
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # Sets the module in training mode.
    model.train()

    end = time.time()

    # enumerate使索引i可用: 0, 1, ..., ceil(|trainset|/batchsize)-1
    # 一个epoch的内容
    for i, (input, target) in enumerate(train_loader):  # target 是标记
        input: torch.FloatTensor
        target: torch.LongTensor
        # 对于mv, batchsize=80:
        #   input.shape  => torch.Size([80, 3, 2, 224, 224])
        #   target.shape => torch.Size([80])
        # 一般的:
        #   input.shape  => torch.Size([batchsize, k, 2 or 3, 224, 224])

        data_time.update(time.time() - end)

        # Variable是对tensor的封装, 操作和tensor是一样的,
        #   但是每个Variable都有三个属性, Variable中的tensor本身.data,
        #   对应tensor的梯度.grad以及这个Variable是通过什么方式得到的.grad_fn
        # 阅读Variable API弃用信息
        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        # input_var: torch.autograd.variable.Variable
        output = model(input_var)
        # output.size() => torch.Size([batchsize*k, 101])
        # torch.Size实际上是一个元组, 所以它支持元组相同的操作.
        # type(output.size()[1:]) <class 'torch.Size'>
        # (-1, 3) + (101, ) = (-1, 3, 101)
        # Returns a new tensor with the same data as the self tensor
        #   but of a different size.
        # log.info(help(output.view))
        output = output.view((-1, cfg.num_segments) + output.size()[1:])
        output = torch.mean(output, dim=1)  # 关于k求均值

        # dir(criterion) 中的__call__属性与函数对象的关系
        # The `input` is expected to contain scores for each class.
        # 此处output即为criterion的input, 此时:
        #   output.shape => (batchsize, classnum)
        # log.info(output.shape)
        # log.info(target_var.shape)
        loss = criterion(output, target_var)
        # loss.shape => torch.Size([1])
        # loss: torch.autograd.variable.Variable

        # output.data: torch.cuda.FloatTensor
        # .data属性是Variable中的tensor本身
        #   output.data.shape => (batchsize, classnum)
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.item(), input.size(0))
        top1.update(prec1.item(), input.size(0))
        top5.update(prec5.item(), input.size(0))

        # grad在反向传播中是累加的(accumulated), 这意味着每次运行反向传播, 梯度
        # 都会累加之前的梯度, 所以反向传播之前需要把梯度清零.
        # Clears the gradients of all optimized Variable s.
        optimizer.zero_grad()

        loss.backward()
        # Taking an optimization step:
        # All optimizers implement step() method, that updates the parameters.
        # The function can be called once the gradients are computed using
        #   e.g. backward().
        # Performs a single optimization step (parameter update).
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()

        if i % PRINT_FREQ == 0:
            log.info((
                'Epoch: [{0}][{1}/{2}], lr: {lr:.7f}\t'
                'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                    epoch, i+1, len(train_loader),
                    batch_time=batch_time,
                    data_time=data_time,
                    loss=losses,
                    top1=top1,
                    top5=top5,
                    lr=cur_lr)))


# 训练间断测试, 用于选择泛化最好的weights
def validate(cfg, val_loader, model, criterion, scheduler):
    global log
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        target = target.cuda(async=True)
        input_var = input
        target_var = target

        output = model(input_var)
        output = output.view((-1, cfg.num_segments) + output.size()[1:])
        output = torch.mean(output, dim=1)
        loss = criterion(output, target_var)

        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))

        losses.update(loss.item(), input.size(0))
        top1.update(prec1.item(), input.size(0))
        top5.update(prec5.item(), input.size(0))

        batch_time.update(time.time() - end)
        end = time.time()

        if i % PRINT_FREQ == 0:
            log.info((
                'Test: [{0}/{1}]\t'
                'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                    i+1, len(val_loader),
                    batch_time=batch_time,
                    loss=losses,
                    top1=top1,
                    top5=top5)))

    # scheduler.step(losses.avg)
    log.info((
        'Testing Results: Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Loss {loss.avg:.5f}'
        .format(top1=top1, top5=top5, loss=losses)))

    return top1.avg


@static_vars(cnt=0)
def save_checkpoint(cfg, state, is_best, filename):
    filename = '_'.join((cfg.model_prefix,
                         cfg.representation.lower(), filename))
    # prefix of model name,
    #   e.g. ~/out/ylimed, where 'ylimed' as the filename prefix
    out_dr = '/'.join(cfg.model_prefix.split('/')[:-1])
    if out_dr != '' and not os.path.isdir(out_dr):
        os.makedirs(out_dr)

    torch.save(state, filename)
    if is_best:
        best_name = '_'.join((cfg.model_prefix,
                              cfg.representation.lower(),
                              'model_best.pth.tar'))
        shutil.copyfile(filename, best_name)
        shutil.copyfile(filename, best_name + '.' + str(save_checkpoint.cnt))
        save_checkpoint.cnt += 1


class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.__sum = 0
        self.__cnt = 0

    def update(self, val, n=1):
        self.val = val
        self.__sum += val * n
        self.__cnt += n
        self.avg = self.__sum / self.__cnt


def adjust_learning_rate(cfg, optimizer, epoch):
    # e.g. lr_steps: [55.0, 110.0, 165.0]
    decay = cfg.lr_decay ** (sum(epoch >= np.array(cfg.lr_steps)))
    lr = cfg.lr * decay  # args.lr 是用户设定的 base lr
    wd = cfg.weight_decay
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr * param_group['lr_mult']
        param_group['weight_decay'] = wd * param_group['decay_mult']
    return lr

def get_lr(optimizer):

    for param_group in optimizer.param_groups:
        lr = param_group['lr']
        break
    return lr

def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.size(0)

    # https://pytorch.org/docs/0.3.1/torch.html#torch.topk
    # torch.topk(input, k, dim=None, largest=True, sorted=True, out=None)
    #   -> (Tensor, LongTensor)  Note: 表示返回
    #
    # topk(k, dim=None, largest=True, sorted=True)
    #   -> (Tensor, LongTensor)
    _, pred = output.topk(maxk, dim=1, largest=True, sorted=True)
    # pred.shape => torch.Size([batchsize, maxk]), 元素是索引, 即预测类
    pred = pred.t()  # 转置

    # 将target reshape 为 torch.Size([1, batchsize])
    # expand_as(tensor)
    #   Expands this tensor to the size of the specified tensor.
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    # e.g.
    #         batchsize
    # top   0 1 ... 58 59
    #  1
    #  2
    #  3
    #  4
    #  5
    # 可想而知, 一列至多一个1
    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res
