# coding: utf-8
"""Run training."""

import shutil
import time
import numpy as np

import torch
import torch.backends.cudnn as cudnn
import torch.nn.parallel
import torchvision

from dataset import CoviarDataSet
from model import Model
from train_options import parser
from transforms import GroupCenterCrop
from transforms import GroupScale
import pdb
SAVE_FREQ = 40
PRINT_FREQ = 20
best_prec1 = 0


def main():
    global args
    global best_prec1
    args = parser.parse_args()

    print('Training arguments:')
    # 顺序取决于parser.add_argument的顺序
    for k, v in vars(args).items():
        print('\t{}: {}'.format(k, v))

    if args.data_name == 'ucf101':
        num_class = 101
    elif args.data_name == 'hmdb51':
        num_class = 51
    elif args.data_name == 'ylimed':
        num_class = 11
    else:
        raise ValueError('Unknown dataset ' + args.data_name)

    model = Model(num_class, args.num_segments, args.representation,
                  base_model=args.arch)
    # type(model) <class 'model.Model'>

    train_loader = torch.utils.data.DataLoader(
        CoviarDataSet(
            args.data_root,
            args.data_name,
            video_list=args.train_list,
            num_segments=args.num_segments,
            representation=args.representation,
            transform=model.get_augmentation(),
            is_train=True,
            accumulate=(not args.no_accumulation),
            ),
        batch_size=args.batch_size, shuffle=True,  # data reshuffled at every epoch
        num_workers=args.workers, pin_memory=True)  # 锁页内存

    val_loader = torch.utils.data.DataLoader(
        CoviarDataSet(
            args.data_root,
            args.data_name,
            video_list=args.test_list,
            num_segments=args.num_segments,
            representation=args.representation,
            transform=torchvision.transforms.Compose([
                GroupScale(int(model.scale_size)),
                GroupCenterCrop(model.crop_size),
                ]),
            is_train=False,
            accumulate=(not args.no_accumulation),
            ),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    # 将两步并为一步
    #   1. This container parallelizes the application of
    #       the given module by splitting the input across the
    #       specified devices by chunking in the batch dimension.
    #       将batch拆开并行
    #   2. Moves all model parameters and buffers to the GPU.
    model = torch.nn.DataParallel(model, device_ids=args.gpus).cuda()
    # type(model) <class 'torch.nn.parallel.data_parallel.DataParallel'>
    # TODO 在程序刚开始加这条语句可以提升一点训练速度(why)
    cudnn.benchmark = True

    # 返回在模型参数上的迭代器,
    #   yielding both the name of the parameter as well as the parameter itself
    # Yields: (string, Parameter) – Tuple containing the name and parameter
    # TODO
    params_dict = dict(model.named_parameters())
    params = []
    for key, value in params_dict.items():
        decay_mult = 0.0 if 'bias' in key else 1.0

        if ('module.base_model.conv1' in key
                or 'module.base_model.bn1' in key
                or 'data_bn' in key) and args.representation in ['mv', 'residual']:
            lr_mult = 0.1
        elif '.fc.' in key:
            lr_mult = 1.0
        else:
            lr_mult = 0.01

        params += [{'params': value, 'lr': args.lr,
                    'lr_mult': lr_mult, 'decay_mult': decay_mult}]

    optimizer = torch.optim.Adam(
        params,
        weight_decay=args.weight_decay,
        eps=0.001)  # 为了提高数值稳定性
    # (分类)准则
    criterion = torch.nn.CrossEntropyLoss().cuda()

    for epoch in range(args.epochs):
        # 返回值cur_lr仅用于打印信息, lr在函数中已经调整
        cur_lr = adjust_learning_rate(optimizer, epoch,
                                      args.lr_steps, args.lr_decay)

        train(train_loader, model, criterion, optimizer, epoch, cur_lr)

        # 评测时间到 or 终了
        if epoch % args.eval_freq == 0 or epoch == args.epochs - 1:
            prec1 = validate(val_loader, model, criterion)

            is_best = prec1 > best_prec1
            best_prec1 = max(prec1, best_prec1)
            # 最好的weights出现了 or checkpoint保存时间到
            if is_best or epoch % SAVE_FREQ == 0:
                save_checkpoint(
                    {
                        'epoch': epoch + 1,
                        'arch': args.arch,
                        'state_dict': model.state_dict(),
                        'best_prec1': best_prec1,
                    },
                    is_best,
                    filename='checkpoint.pth.tar')


def train(train_loader, model, criterion, optimizer, epoch, cur_lr):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    # Sets the module in training mode.
    model.train()

    end = time.time()

    # enumerate使索引i可用: 0, 1, ..., ceil(|trainset|/batchsize)-1
    # 一个epoch的内容
    for i, (input, target) in enumerate(train_loader):  # target 是标记
        # 对于mv, batchsize=80:
        #   input.shape  => torch.Size([80, 3, 2, 224, 224])
        #   target.shape => torch.Size([80])
        # 一般的:
        #   input.shape  => torch.Size([batchsize, k, 2 or 3, 224, 224])

        # type(input)  => <class 'torch.FloatTensor'>
        # type(target) => <class 'torch.LongTensor'>

        data_time.update(time.time() - end)

        # Variable是对tensor的封装, 操作和tensor是一样的,
        #   但是每个Variable都有三个属性, Variable中的tensor本身.data,
        #   对应tensor的梯度.grad以及这个Variable是通过什么方式得到的.grad_fn
        # 阅读Variable API弃用信息
        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input)
        target_var = torch.autograd.Variable(target)

        # type(input_var) => <class 'torch.autograd.variable.Variable'>
        output = model(input_var)
        # output.size() => torch.Size([?, 101]), ? = batchsize*k
        # torch.Size实际上是一个元组, 所以它支持元组相同的操作.
        # type(output.size()[1:]) <class 'torch.Size'>
        # (-1, 3) + (101, ) = (-1, 3, 101)
        # Returns a new tensor with the same data as the self tensor
        #   but of a different size.
        # print(help(output.view))
        output = output.view((-1, args.num_segments) + output.size()[1:])
        output = torch.mean(output, dim=1)  # 关于k求均值

        # dir(criterion) 中的__call__属性与函数对象的关系
        # The `input` is expected to contain scores for each class.
        # 此处output即为criterion的input, 此时:
        #   output.shape => (batchsize, classnum)
        loss = criterion(output, target_var)
        # loss.shape => torch.Size([1])
        # type(loss) => <class 'torch.autograd.variable.Variable'>

        # type(output.data) => <class 'torch.cuda.FloatTensor'>
        # .data属性是Variable中的tensor本身
        #   output.data.shape => (batchsize, classnum)
        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.data[0], input.size(0))
        top1.update(prec1[0], input.size(0))
        top5.update(prec5[0], input.size(0))

        # grad在反向传播中是累加的(accumulated), 这意味着每次运行反向传播, 梯度
        # 都会累加之前的梯度, 所以反向传播之前需要把梯度清零.
        # Clears the gradients of all optimized Variable s.
        optimizer.zero_grad()

        # Taking an optimization step:
        # All optimizers implement a step() method, that
        #   updates the parameters.
        # The function can be called
        #   once the gradients are computed using e.g. backward().
        loss.backward()
        # Performs a single optimization step (parameter update).
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()

        if i % PRINT_FREQ == 0:
            print(('Epoch: [{0}][{1}/{2}], lr: {lr:.7f}\t'
                   'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                   'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                   'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                   'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                   'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                       epoch, i, len(train_loader),
                       batch_time=batch_time,
                       data_time=data_time,
                       loss=losses,
                       top1=top1,
                       top5=top5,
                       lr=cur_lr)))


# 训练间断验证, 用于选择泛化最好的weights
def validate(val_loader, model, criterion):
    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input, volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)

        output = model(input_var)
        output = output.view((-1, args.num_segments) + output.size()[1:])
        output = torch.mean(output, dim=1)
        loss = criterion(output, target_var)

        prec1, prec5 = accuracy(output.data, target, topk=(1, 5))

        # pdb.set_trace()
        # loss.data.shape => torch.Size([1])
        losses.update(loss.data[0], input.size(0))
        top1.update(prec1[0], input.size(0))
        top5.update(prec5[0], input.size(0))

        batch_time.update(time.time() - end)
        end = time.time()

        if i % PRINT_FREQ == 0:
            print(('Test: [{0}/{1}]\t'
                   'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                   'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                   'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                   'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                       i, len(val_loader),
                       batch_time=batch_time,
                       loss=losses,
                       top1=top1,
                       top5=top5)))

    print(('Testing Results: Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Loss {loss.avg:.5f}'
           .format(top1=top1, top5=top5, loss=losses)))

    return top1.avg


def save_checkpoint(state, is_best, filename):
    filename = '_'.join((args.model_prefix,
                         args.representation.lower(), filename))
    torch.save(state, filename)
    if is_best:
        best_name = '_'.join((args.model_prefix,
                              args.representation.lower(),
                              'model_best.pth.tar'))
        shutil.copyfile(filename, best_name)


class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.__sum = 0
        self.__cnt = 0

    def update(self, val, n=1):
        self.val = val
        self.__sum += val * n
        self.__cnt += n
        self.avg = self.__sum / self.__cnt


def adjust_learning_rate(optimizer, epoch, lr_steps, lr_decay):
    # e.g. lr_steps: [55.0, 110.0, 165.0]
    decay = lr_decay ** (sum(epoch >= np.array(lr_steps)))
    lr = args.lr * decay  # args.lr 是用户设定的 base lr
    wd = args.weight_decay
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr * param_group['lr_mult']
        param_group['weight_decay'] = wd * param_group['decay_mult']
    return lr


def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.size(0)

    # https://pytorch.org/docs/0.3.1/torch.html#torch.topk
    # torch.topk(input, k, dim=None, largest=True, sorted=True, out=None)
    #   -> (Tensor, LongTensor)  TODO 返回的意思
    #
    # topk(k, dim=None, largest=True, sorted=True)
    #   -> (Tensor, LongTensor)
    _, pred = output.topk(maxk, dim=1, largest=True, sorted=True)
    # pred.shape => torch.Size([batchsize, maxk]), 元素是索引, 即预测类
    pred = pred.t()  # 转置

    # 将target reshape 为 torch.Size([1, batchsize])
    # expand_as(tensor)
    #   Expands this tensor to the size of the specified tensor.
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    # e.g.
    #         batchsize
    # top   0 1 ... 58 59
    #  1
    #  2
    #  3
    #  4
    #  5
    # 可想而知, 一列至多一个1
    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k.mul_(100.0 / batch_size))
    return res


if __name__ == '__main__':
    main()
