# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import time
import logging
import numpy as np
from pathlib import Path
from collections import namedtuple
from PIL import Image

import torch
import torch.optim as optim
import torch.nn as nn
import torchvision

def concat_batch(batchItem):
    res_list = []
    for item in batchItem:
        tmp = torch.stack(item, dim=0)
        res_list.append(tmp)
    res = torch.cat(res_list)
    return res


def connection_data(a, b):
    assert type(a) == type(b)
    if isinstance(a, np.ndarray):
        return np.concatenate([a, b], axis=0)
    elif isinstance(a, list):
        return a + b
    else:
        ValueError(f'Type {type(a)} data is not supported!')


def merge_dicts(dict_list, mode='norepeat'):
    result = dict_list[0]
    if mode == 'norepeat':
        for i in range(1, len(dict_list)):
            result.update(dict_list[i])
    else:
        for i in range(1, len(dict_list)):
            dict_item = dict_list[i]
            for key, val in dict_item.items():
                result[key] = connection_data(result[key], val)
    return result

def get_valid_output(outputs, length):
    N = max(length)

    # batch_size = outputs.size(0) // N
    # num_joints = outputs.size(1)

    # outputs = outputs.reshape(batch_size, N, num_joints, outputs.size(-2), outputs.size(-1))
    valid_list = []
    for item, valid in zip(outputs, length):
        valid_output = item[:valid,...]
        valid_list.append(valid_output)

    output = torch.cat(valid_list, dim=0)   #[sum(length), 17, 64, 48]
    return output

def create_logger(cfg, cfg_name, global_rank, phase='train'):
    if global_rank == 0:
        root_output_dir = Path(cfg.OUTPUT_DIR)
        # set up logger
        if not root_output_dir.exists():
            print('=> creating {}'.format(root_output_dir))
            root_output_dir.mkdir()

        dataset = cfg.DATASET.DATASET + '_' + cfg.DATASET.HYBRID_JOINTS_TYPE \
            if cfg.DATASET.HYBRID_JOINTS_TYPE else cfg.DATASET.DATASET
        dataset = dataset.replace(':', '_')
        model = cfg.MODEL.NAME
        cfg_name = os.path.basename(cfg_name).split('.')[0]

        final_output_dir = root_output_dir / dataset / model / cfg_name

        print('=> creating {}'.format(final_output_dir))
        final_output_dir.mkdir(parents=True, exist_ok=True)

        time_str = time.strftime('%Y-%m-%d-%H-%M')
        log_file = 'GT_{}_{}_{}_{}.log'.format(cfg.TEST.USE_GT_BBOX, phase, cfg_name, time_str)
        final_log_file = final_output_dir / log_file
        head = '%(asctime)-15s %(message)s'
        logging.basicConfig(filename=str(final_log_file),
                            format=head)
        logger = logging.getLogger()
        logger.setLevel(logging.INFO)
        console = logging.StreamHandler()
        logging.getLogger('').addHandler(console)

        tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / model / \
            (cfg_name + '_' + time_str)

        print('=> creating {}'.format(tensorboard_log_dir))
        tensorboard_log_dir.mkdir(parents=True, exist_ok=True)
        return logger, str(final_output_dir), str(tensorboard_log_dir)
    else:
        root_output_dir = Path(cfg.OUTPUT_DIR)
        dataset = cfg.DATASET.DATASET + '_' + cfg.DATASET.HYBRID_JOINTS_TYPE \
            if cfg.DATASET.HYBRID_JOINTS_TYPE else cfg.DATASET.DATASET
        dataset = dataset.replace(':', '_')
        model = cfg.MODEL.NAME
        final_output_dir = root_output_dir / dataset / model / cfg_name
        return None, str(final_output_dir), None


def get_optimizer(cfg, parameters):
    optimizer = None
    if cfg.TRAIN.OPTIMIZER == 'sgd':
        optimizer = optim.SGD(
            parameters,
            lr=cfg.TRAIN.LR,
            momentum=cfg.TRAIN.MOMENTUM,
            weight_decay=cfg.TRAIN.WD,
            nesterov=cfg.TRAIN.NESTEROV
        )
    elif cfg.TRAIN.OPTIMIZER == 'adam':
        optimizer = optim.Adam(
            parameters,
            lr=cfg.TRAIN.LR
        )
    elif cfg.TRAIN.OPTIMIZER == 'adamw':
        optimizer = optim.AdamW(
            parameters,
            lr=cfg.TRAIN.LR,
            betas=(0.9,0.999),
            weight_decay=0.01,
        )

    return optimizer


def save_checkpoint(states, is_best, output_dir,
                    filename='checkpoint.pth'):
    torch.save(states, os.path.join(output_dir, filename))
    torch.save(states, os.path.join(output_dir, 'checkpoint.pth'))

    if is_best and 'state_dict' in states:
        torch.save(states['best_state_dict'],
                   os.path.join(output_dir, 'model_best.pth'))


def _to_list_of_tensor(x, dtype=None, device=None):
    return [torch.as_tensor(item, dtype=dtype, device=device) for item in x]

def _to_tuple_of_tensor(x, dtype=None, device=None):
    return tuple(torch.as_tensor(item, dtype=dtype, device=device) for item in x)

def _to_dict_of_tensor(x, dtype=None, device=None):
    return dict([(k, torch.as_tensor(v, dtype=dtype, device=device)) for k, v in x.items()])

def to_tensor(x, dtype=None, device=None):
    """Convert input data to tensor based on its format"""
    input_format = type(x)
    if isinstance(x, torch.Tensor):
        return torch.as_tensor(x, dtype=dtype, device=device)
    elif isinstance(x, Image.Image):
        return torchvision.transforms.functional.to_tensor(x).to(
            dtype=dtype, device=device)
    elif isinstance(x, list):
        return _to_list_of_tensor(x, dtype=dtype, device=device)
    elif isinstance(x, tuple):
        return _to_tuple_of_tensor(x, dtype=dtype, device=device)
    elif isinstance(x, dict):
        return _to_dict_of_tensor(x, dtype=dtype, device=device)
    else:
        raise ValueError("Unsupported format {}".format(input_format))