# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import logging
import time
from collections import namedtuple
from pathlib import Path

import torch
import torch.optim as optim
import torch.nn as nn
from naie.context import Context


def create_logger(cfg, cfg_name, phase='train'):
    '''
    创建运行代码的日志
    :param cfg:参数
    :param cfg_name:sfg的文件名
    :param phase:  train or test
    :return: logger，文件输出路径，tensorboard日志输出的路径
    '''
    root_output_dir = Path(cfg.OUTPUT_DIR)  # 可以通过把字符串类型的路径，转换为 Pathlib.Path 类型的路径
    # set up logger
    # 如果文件路径不存在，就创建文件路径
    if not root_output_dir.exists():
        print('=> creating {}'.format(root_output_dir))
        root_output_dir.mkdir()

    # TODO 不知道HYBRID_JOINTS_TYPE这个是什么意思
    # 如果有HYBRID_JOINTS_TYPE， dataset就是就增加一个HYBRID_JOINTS_TYPE，否则就是DATASET
    dataset = cfg.DATASET.DATASET + '_' + cfg.DATASET.HYBRID_JOINTS_TYPE \
        if cfg.DATASET.HYBRID_JOINTS_TYPE else cfg.DATASET.DATASET
    dataset = dataset.replace(':', '_')
    model = cfg.MODEL.NAME  # 模型的名称
    '''
    os.path.basename返回path最后的文件名
    例如： 
    >>> import os
    >>> path = '/Users/beazley/Data/data.csv'
    >>> # Get the last component of the path
    >>> os.path.basename(path)
    'data.csv'
    '''
    cfg_name = os.path.basename(cfg_name).split('.')[0]  # 除了后缀以外的文件名

    final_output_dir = root_output_dir / dataset / model / cfg_name  # 获取整个路径

    print('=> creating {}'.format(final_output_dir))
    final_output_dir.mkdir(parents=True, exist_ok=True)

    time_str = time.strftime('%Y-%m-%d-%H-%M')  # 时间的输出格式
    log_file = '{}_{}_{}.log'.format(cfg_name, time_str, phase)  # 日志的文件名
    final_log_file = final_output_dir / log_file  # 最终日志的存放路径

    '''
    logging.basicConfig(filename=str(final_log_file),format=head)
    filename: 日志存放的文件
    format：日志打印的格式
    %(asctime)s: 打印日志的时间
    %(message)s: 打印日志信息
    '''
    head = '%(asctime)-15s %(message)s'
    logging.basicConfig(filename=str(final_log_file),
                        format=head)
    '''
    logger 
    logging模块中最基础的模块，用于输出日志，getLogger()初始化Logger
    setLevel()设置日志等级，低于该等级低的信息不会输出
        debug:debug级输出
        info：info 级输出，重要信息
        warning：warning级输出，与warn相同，警告信息
        error：error级输出，错误信息
        critical ：critical级输出，严重错误信息
        五个等级从低到高分别是debug到critical
        当seLevel设置为DEBUG时，可以截获取所有等级的输出
    '''
    logger = logging.getLogger()  # 初始化logger：logging模块中最基础的对象
    logger.setLevel(logging.INFO)  # logging.INFO确认一切按预期运行

    console = logging.StreamHandler()  # 日志输出的地方，这边设置日志输出到流
    logging.getLogger('').addHandler(console)

    tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / model / \
        (cfg_name + '_' + time_str)

    print('=> creating {}'.format(tensorboard_log_dir))
    tensorboard_log_dir.mkdir(parents=True, exist_ok=True)

    return logger, str(final_output_dir), str(tensorboard_log_dir)


def get_optimizer(cfg, model):
    optimizer = None
    if cfg.TRAIN.OPTIMIZER == 'sgd':
        optimizer = optim.SGD(
            model.parameters(),
            lr=cfg.TRAIN.LR,
            momentum=cfg.TRAIN.MOMENTUM,
            weight_decay=cfg.TRAIN.WD,
            nesterov=cfg.TRAIN.NESTEROV
        )
    elif cfg.TRAIN.OPTIMIZER == 'adam':
        optimizer = optim.Adam(
            model.parameters(),
            lr=cfg.TRAIN.LR
        )

    return optimizer


def save_checkpoint(states, is_best, output_dir,
                    filename='checkpoint.pth'):
    torch.save(states, os.path.join(output_dir, filename))
    # 这个是华为云平台才有的
    torch.save(states, os.path.join('/cache',filename))
    output_path = Context.get_output_path()
    print(output_path)
    print('保存checkpoint.pth')
    if is_best and 'state_dict' in states:
        torch.save(states['best_state_dict'],
                   os.path.join(output_dir, 'model_best.pth'))
        # 下面是华为云才有的
        torch.save(states['best_state_dict'],
           os.path.join('/cache','model_best.pth'))
        print('保存moel_best.pth')


def get_model_summary(model, *input_tensors, item_length=26, verbose=False):
    """
    :param model:
    :param input_tensors:
    :param item_length:
    :return:
    """

    summary = []

    ModuleDetails = namedtuple(
        "Layer", ["name", "input_size", "output_size", "num_parameters", "multiply_adds"])
    hooks = []
    layer_instances = {}

    def add_hooks(module):

        def hook(module, input, output):
            class_name = str(module.__class__.__name__)

            instance_index = 1
            if class_name not in layer_instances:
                layer_instances[class_name] = instance_index
            else:
                instance_index = layer_instances[class_name] + 1
                layer_instances[class_name] = instance_index

            layer_name = class_name + "_" + str(instance_index)

            params = 0

            if class_name.find("Conv") != -1 or class_name.find("BatchNorm") != -1 or \
               class_name.find("Linear") != -1:
                for param_ in module.parameters():
                    params += param_.view(-1).size(0)

            flops = "Not Available"
            if class_name.find("Conv") != -1 and hasattr(module, "weight"):
                flops = (
                    torch.prod(
                        torch.LongTensor(list(module.weight.data.size()))) *
                    torch.prod(
                        torch.LongTensor(list(output.size())[2:]))).item()
            elif isinstance(module, nn.Linear):
                flops = (torch.prod(torch.LongTensor(list(output.size()))) \
                         * input[0].size(1)).item()

            if isinstance(input[0], list):
                input = input[0]
            if isinstance(output, list):
                output = output[0]

            summary.append(
                ModuleDetails(
                    name=layer_name,
                    input_size=list(input[0].size()),
                    output_size=[0,0],#list(output.size()),
                    num_parameters=params,
                    multiply_adds=flops)
            )

        if not isinstance(module, nn.ModuleList) \
           and not isinstance(module, nn.Sequential) \
           and module != model:
            hooks.append(module.register_forward_hook(hook))

    model.eval()
    model.apply(add_hooks)

    space_len = item_length

    model(*input_tensors)
    for hook in hooks:
        hook.remove()

    details = ''
    if verbose:
        details = "Model Summary" + \
            os.linesep + \
            "Name{}Input Size{}Output Size{}Parameters{}Multiply Adds (Flops){}".format(
                ' ' * (space_len - len("Name")),
                ' ' * (space_len - len("Input Size")),
                ' ' * (space_len - len("Output Size")),
                ' ' * (space_len - len("Parameters")),
                ' ' * (space_len - len("Multiply Adds (Flops)"))) \
                + os.linesep + '-' * space_len * 5 + os.linesep

    params_sum = 0
    flops_sum = 0
    for layer in summary:
        params_sum += layer.num_parameters
        if layer.multiply_adds != "Not Available":
            flops_sum += layer.multiply_adds
        if verbose:
            details += "{}{}{}{}{}{}{}{}{}{}".format(
                layer.name,
                ' ' * (space_len - len(layer.name)),
                layer.input_size,
                ' ' * (space_len - len(str(layer.input_size))),
                layer.output_size,
                ' ' * (space_len - len(str(layer.output_size))),
                layer.num_parameters,
                ' ' * (space_len - len(str(layer.num_parameters))),
                layer.multiply_adds,
                ' ' * (space_len - len(str(layer.multiply_adds)))) \
                + os.linesep + '-' * space_len * 5 + os.linesep

    details += os.linesep \
        + "Total Parameters: {:,}".format(params_sum) \
        + os.linesep + '-' * space_len * 5 + os.linesep
    details += "Total Multiply Adds (For Convolution and Linear Layers only): {:,} GFLOPs".format(flops_sum/(1024**3)) \
        + os.linesep + '-' * space_len * 5 + os.linesep
    details += "Number of Layers" + os.linesep
    for layer in layer_instances:
        details += "{} : {} layers   ".format(layer, layer_instances[layer])

    return details
