# ------------------------------------------------------------------------------
# Copyright (c) HQU
# Licensed under the HQU License.
# Written by Wang Youjije (youjieWang@stu.hqu.edu.cn)
# Modified by Wang Youjije (youjieWang@stu.hqu.edu.cn)
# ------------------------------------------------------------------------------

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import os
import pprint
import shutil

import numpy as np
import random

import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter

from lib.config import cfg
from lib.config import update_config
from lib.core.function import train, validate
from lib.utils.utils import create_logger
from lib.core.loss import JointsMSELoss, JointsCoordMSELoss
from lib.utils.utils import get_optimizer
from lib.utils.utils import save_checkpoint

import dataset
import models


def parse_args():
    parser = argparse.ArgumentParser(description='Train keypoints network')  # 创建解析器
    # general
    # 添加参数
    parser.add_argument('--cfg',
                        help='experiment configure file name',
                        required=True,
                        type=str)

    parser.add_argument('opts',
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    parser.add_argument('--modelDir',
                        help='model directory',
                        type=str,
                        default='')
    parser.add_argument('--logDir',
                        help='log directory',
                        type=str,
                        default='')
    parser.add_argument('--dataDir',
                        help='data directory',
                        type=str,
                        default='')
    parser.add_argument('--prevModelDir',
                        help='prev Model directory',
                        type=str,
                        default='')

    args = parser.parse_args()

    return args


def main():
    args = parse_args()
    # 这个就是来更新参数，把args里面的参数替换cfg里面设置的参数值
    update_config(cfg, args)  # from lib.config import cfg ==》_C

    # 创建训练的日志
    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'train')  # args.cfg就是我们传入的文件名

    logger.info(pprint.pformat(args))  # 打印信息
    logger.info(cfg)
    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    # seed 随机种子保证每次实验生成的随机数的值的顺序是一样
    seed = 22
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)

    # cfg.MODEL.NAME='pose_hrnet'
    # 使用了init_path所以是在lib下面的
    # eval用来执行一个字符串表达式，并返回表达式的值， models.CNT_h.get_pose_net(cfg, si_train=True)
    # cfg.MODEL.NAME  《==》  models.transpose_h.py
    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(
        cfg, is_train=True
    )

    this_dir = os.path.dirname(__file__)  # 获取除了文件名以外的绝对路径
    shutil.copy2(
        os.path.join(this_dir, '../lib/models', cfg.MODEL.NAME + '.py'),
        final_output_dir)  # 讲模型代码文件拷贝到final_output_dir

    '''
    tensorboardX可视化工具
    首先是：定义一个实例SummerWriter(),参数为：
    def __init__(self, log_dir=None, comment='', **kwargs):
    其中log_dir为生成的文件所放的目录，comment为文件名称。默认目录为生成runs文件夹目录。
     '''
    writer_dict = {
        'writer': SummaryWriter(log_dir=tb_log_dir),  # 4/25 TODO 这边需要注意生成TensorBoard存放的路径，华为云上面需要改成对应/cache存放的路径下面
        'train_global_steps': 0,
        'valid_global_steps': 0,
    }

    # 生成和图片一样大小的随机矩阵， 不知道用来干嘛的
    dump_input = torch.rand(
        (1, 3, cfg.MODEL.IMAGE_SIZE[1], cfg.MODEL.IMAGE_SIZE[0])
    )

    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()  # TODO 这边是一个元组类型，应该有问题应该是list类型

    # define loss function (criterion) and optimizer
    # 自定义loss， 这边应该定义两个loss，我还不知道两个loss怎么计算
    # TODO 这边还要定义一个坐标损失
    criterions = []
    criterion_h = JointsMSELoss(
        use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT
    ).cuda()
    criterion_c = JointsCoordMSELoss().cuda()
    criterions.append(criterion_h)
    criterions.append(criterion_c)

    # Data loading code
    # 数据加载代码，这部分基本不要改动
    normalize = transforms.Normalize(
        mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
    )
    # dataset.coco
    train_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TRAIN_SET, True,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ])
    )
    valid_dataset = eval('dataset.' + cfg.DATASET.DATASET)(
        cfg, cfg.DATASET.ROOT, cfg.DATASET.TEST_SET, False,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ])
    )
    # TODO 4/25 这部分可能训练参数需要改动比如batch_size
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=cfg.TRAIN.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
        shuffle=cfg.TRAIN.SHUFFLE,
        num_workers=cfg.WORKERS,
        pin_memory=cfg.PIN_MEMORY
    )
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=cfg.TEST.BATCH_SIZE_PER_GPU * len(cfg.GPUS),
        shuffle=False,
        num_workers=cfg.WORKERS,
        pin_memory=cfg.PIN_MEMORY
    )

    best_perf = 0.0
    best_model = False
    last_epoch = -1

    # 设置优化器
    optimizer = get_optimizer(cfg, model)

    # 设置开始的epoch
    begin_epoch = cfg.TRAIN.BEGIN_EPOCH
    checkpoint_file = os.path.join(
        final_output_dir, 'checkpoint.pth'
    )

    if cfg.AUTO_RESUME and os.path.exists(checkpoint_file):
        logger.info("=> loading checkpoint '{}'".format(checkpoint_file))
        checkpoint = torch.load(checkpoint_file)
        begin_epoch = checkpoint['epoch']
        best_perf = checkpoint['perf']
        last_epoch = checkpoint['epoch']

        writer_dict['train_global_steps'] = checkpoint['train_global_steps']
        writer_dict['valid_global_steps'] = checkpoint['valid_global_steps']

        model.load_state_dict(checkpoint['state_dict'])

        optimizer.load_state_dict(checkpoint['optimizer'])
        logger.info("=> loaded checkpoint '{}' (epoch {})".format(
            checkpoint_file, checkpoint['epoch']))

    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, cfg.TRAIN.END_EPOCH, eta_min=cfg.TRAIN.LR_END, last_epoch=last_epoch)

    model.cuda()

    for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):

        logger.info("=> current learning rate is {:.6f}".format(lr_scheduler.get_last_lr()[0]))
        # 训练一个批次的代码
        train(cfg, train_loader, model, criterions, optimizer, epoch,
              final_output_dir, tb_log_dir, writer_dict)

        # TODO 测试跟训练使用的代码可是不一样的
        perf_indicator = validate(
            cfg, valid_loader, valid_dataset, model, criterions,
            final_output_dir, tb_log_dir, writer_dict
        )

        lr_scheduler.step()  # 更新参数

        if perf_indicator >= best_perf:
            best_perf = perf_indicator
            best_model = True
        else:
            best_model = False

        # 保存当前最好的模型
        logger.info('=> saving checkpoint to {}'.format(final_output_dir))
        save_checkpoint({
            'epoch': epoch + 1,
            'model': cfg.MODEL.NAME,
            'state_dict': model.state_dict(),
            'best_state_dict': model.module.state_dict(),
            'perf': perf_indicator,
            'optimizer': optimizer.state_dict(),
            'train_global_steps': writer_dict['train_global_steps'],
            'valid_global_steps': writer_dict['valid_global_steps'],
        }, best_model, final_output_dir)

        final_model_state_file = os.path.join(
            final_output_dir, 'final_state.pth'
        )
        logger.info('=> saving final model state to {}'.format(
            final_model_state_file)
        )
        torch.save(model.module.state_dict(), final_model_state_file)
        writer_dict['writer'].close()


if __name__ == '__main__':
    main()
