import os
import time
import logging
import numpy as np
from pylsy import pylsytable

import torch

from core.evaluate import accuracy
from core.inference import get_final_preds
from utils.transforms import flip_back
from utils.vis import save_debug_images
from utils.utils import concat_batch
from utils import ddp_opx


logger = logging.getLogger(__name__)


class Trainer():
    def __init__(self, args, cfg):
        self.args = args
        self.cfg = cfg

    def train_one_epoch(self, train_loader, model, criterion, optimizer, epoch, output_dir=None, writer_dict=None):
        if self.cfg.DATASET.DATASET == 'coco':
            self.train_coco(train_loader, model, criterion, optimizer, epoch, output_dir, writer_dict)
        elif self.cfg.DATASET.DATASET == 'vcoco':
            self.train_vcoco(train_loader, model, criterion, optimizer, epoch, output_dir, writer_dict)

    def train_coco(self, train_loader, model, criterion, optimizer, epoch, output_dir, writer_dict):
        batch_time = AverageMeter()
        data_time = AverageMeter()
        acc = AverageMeter()
        losses = AverageMeter()
        losses_intra = AverageMeter()
        losses_inter = AverageMeter()

        # switch to train mode
        model.train()
        loss_weights = self.cfg.MODEL.LOSS_WEIGHTS

        end = time.time()
        for i, (input, target, target_weight, meta) in enumerate(train_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            input = relocate_to_cuda(input, non_blocking=True)
            target = relocate_to_cuda(target, non_blocking=True) # [sum(length), 17, h, w]
            target_weight = relocate_to_cuda(target_weight, non_blocking=True)

            outputs = model({
                'x': input,
                'length': meta['length'].cpu().numpy().tolist()
            })     # [bs*N, 17, h, w]

            output = outputs['multi']
            
            loss = criterion(outputs['multi'], target, target_weight)
            losses_inter.update(loss.item(), target.size(0))
            if 'single' in outputs and outputs['single'] is not None:      
                loss_single = criterion(outputs['single'], target, target_weight)
                losses_intra.update(loss_single.item(), target.size(0))
                loss = loss_weights[0]*loss_single + loss_weights[1]*loss
            
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # measure accuracy and record loss
            losses.update(loss.item(), target.size(0))

            _, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(),
                                            target.detach().cpu().numpy())
            acc.update(avg_acc, cnt)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            
            if ddp_opx.is_main_process() and i % self.cfg.PRINT_FREQ == 0:
                if self.cfg.DEBUG.DEBUG:
                    prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i)
                    save_debug_images(self.cfg, input, meta, target, pred*4, output, prefix)

                msg_dict = {
                    'epoch': epoch,
                    'now': i,
                    'length': len(train_loader),
                    'batch_time': batch_time,
                    'speed': target.size(0)/batch_time.val,
                    'data_time': data_time,
                    'acc': acc,
                    "losses_intra": losses_intra,
                    "losses_inter": losses_inter,
                    'losses': losses
                }
                self.log_records('train', writer_dict, **msg_dict)

    def train_vcoco(self, train_loader, model, criterion, optimizer, epoch, output_dir, writer_dict):
        batch_time = AverageMeter()
        data_time = AverageMeter()
        acc = AverageMeter()
        losses = AverageMeter()
        losses_intra = AverageMeter()
        losses_inter = AverageMeter()
        losses_hoi = AverageMeter()
        losses_peho = AverageMeter()

        # switch to train mode
        model.train()

        loss_weights = self.cfg.MODEL.LOSS_WEIGHTS

        end = time.time()
        for i, (input, target, target_weight, meta) in enumerate(train_loader):
            input = relocate_to_cuda(input, non_blocking=True)
            target = relocate_to_cuda(target, non_blocking=True) # [sum(length), 17, h, w]
            target_weight = relocate_to_cuda(target_weight, non_blocking=True)
            meta = relocate_to_cuda(meta, non_blocking=True)

            # measure data loading time
            data_time.update(time.time() - end)

            samples = {
                'x': input,
                'length': meta['length'],
                'hoi': meta['hoi'],
            }
            outputs, interaction_loss = model(samples)     # [bs*N, 17, h, w]

            output = outputs['multi']
            
            # loss_multi = criterion(outputs['multi'], target, target_weight)
            # loss_single = 0 if len(outputs['single'])==0 else criterion(outputs['single'], target, target_weight)
            loss_pesehoi = 0 if len(outputs['pose_hoi'])==0 else criterion(outputs['pose_hoi'], target, target_weight)
            # losses_intra.update(loss_single.item(), target.size(0))
            # losses_inter.update(loss_multi.item(), target.size(0))
            losses_peho.update(loss_pesehoi.item(), target.size(0))
            # losses_hoi.update(interaction_loss.item(), target.size(0))

            loss = loss_pesehoi
            # loss = (loss_multi + loss_single + loss_pesehoi)*0.9 # + (interaction_loss)*1e-4
            
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # measure accuracy and record loss
            losses.update(loss.item(), target.size(0))

            _, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(),
                                            target.detach().cpu().numpy())
            acc.update(avg_acc, cnt)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if ddp_opx.is_main_process() and i % self.cfg.PRINT_FREQ == 0:
                if self.cfg.DEBUG.DEBUG:
                    prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i)
                    # save_debug_images(self.cfg, concat_batch(input), meta, target, pred*4, output, prefix)

                msg_dict = {
                    'epoch': epoch,
                    'now': i,
                    'length': len(train_loader),
                    'batch_time': batch_time,
                    'speed': target.size(0)/batch_time.val,
                    'data_time': data_time,
                    'acc': acc,
                    # 'losses_hoi': losses_hoi,
                    # 'losses_intra': losses_intra,
                    # 'losses_inter': losses_inter,
                    'losses_peho': losses_peho,
                    'losses': losses,
                }
                self.log_records('train', writer_dict, **msg_dict)

    def validate(self, val_loader, val_dataset, model, output_dir):
        if self.cfg.DATASET.DATASET == 'coco':
            return self.validate_coco(val_loader, val_dataset, model, output_dir)
        elif self.cfg.DATASET.DATASET == 'vcoco':
            return self.validate_vcoco(val_loader, val_dataset, model, output_dir)

    @torch.no_grad()
    def validate_coco(self, val_loader, val_dataset, model, output_dir):
        batch_time = AverageMeter()
        acc = AverageMeter()

        # switch to evaluate mode
        model.eval()

        all_preds = np.zeros(
            (1, self.cfg.MODEL.NUM_JOINTS, 3),
            dtype=np.float32
        )
        all_boxes = np.zeros((1, 6))
        image_path = []
        idx = 0
        end = time.time()
        for i, (input, target, target_weight, meta) in enumerate(val_loader):
            input = relocate_to_cuda(input, non_blocking=True)
            target = relocate_to_cuda(target, non_blocking=True) # [sum(length), 17, h, w]
            target_weight = relocate_to_cuda(target_weight, non_blocking=True)
            
            # compute output
            outputs = model({
                'x': input,
                'length': meta['length'].cpu().numpy().tolist()
            })

            output = outputs['multi']
            length = meta['length'].numpy().tolist()

            if self.cfg.TEST.FLIP_TEST:
                # this part is ugly, because pytorch has not supported negative index
                # input_flipped = model(input[:, :, :, ::-1])
                input_flipped = np.flip(input.cpu().numpy(), 3).copy()

                input_flipped = torch.from_numpy(input_flipped).cuda()

                # with autocast():
                outputs_flipped = model({
                    'x': input_flipped,
                    'length': meta['length'].cpu().numpy().tolist()
                })

                if isinstance(outputs, dict):
                    output_flipped = outputs_flipped['multi']
                else:
                    output_flipped = outputs_flipped

                output_flipped = flip_back(output_flipped.cpu().numpy(),
                                        val_dataset.flip_pairs)
                output_flipped = torch.from_numpy(output_flipped.copy()).cuda()

                output = (output + output_flipped) * 0.5

            num_images = target.size(0)
            # measure accuracy and record loss
            _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
                                            target.cpu().numpy())

            acc.update(avg_acc, cnt)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if self.cfg.TEST.USE_GT_BBOX:
                c = meta['center'].cpu().numpy()
                s = meta['scale'].cpu().numpy()
                score = meta['score'].cpu().numpy()
            else:
                c = meta['center'][0].numpy()
                s = meta['scale'][0].numpy()
                score = meta['score'][0].numpy()

            preds, maxvals = get_final_preds(
                self.cfg, output.clone().cpu().numpy(), c, s)

            extend_pred = np.zeros((num_images, self.cfg.MODEL.NUM_JOINTS, 3), dtype=np.float32)
            extend_boxes = np.zeros((num_images, 6), dtype=np.float32)
            all_preds = np.concatenate((all_preds, extend_pred), axis=0)
            all_boxes = np.concatenate((all_boxes, extend_boxes), axis=0)

            all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
            all_preds[idx:idx + num_images, :, 2:3] = maxvals
            # double check this all_boxes parts
            all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
            all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
            all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1)
            all_boxes[idx:idx + num_images, 5] = score
            
            for num, img in zip(length, meta['image']):
                image_path.extend([img for _ in range(num)])

            idx += num_images

            if ddp_opx.is_main_process() and i % self.cfg.PRINT_FREQ == 0:
                msg = 'Test: [{0}/{1}]\t' \
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                    'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                        i, len(val_loader), batch_time=batch_time,
                        acc=acc)
                logger.info(msg)

                if self.cfg.DEBUG.DEBUG:
                    prefix = '{}_{}'.format(
                        os.path.join(output_dir, 'val'), i
                    )
                    save_debug_images(self.cfg, input, meta, target, pred*4, output, prefix)
                
        all_preds = all_preds[:idx,...]
        all_boxes = all_boxes[:idx,...]

        assert all_preds.shape[0] == all_boxes.shape[0] == len(image_path)

        return {
            'all_preds': all_preds, 
            'all_boxes': all_boxes, 
            'image_path': image_path
        }
            
    @torch.no_grad()
    def validate_vcoco(self, val_loader, val_dataset, model, output_dir):
        batch_time = AverageMeter()
        acc = AverageMeter()

        # switch to evaluate mode
        model.eval()

        all_preds = np.zeros(
            (1, self.cfg.MODEL.NUM_JOINTS, 3),
            dtype=np.float32
        )
        all_boxes = np.zeros((1, 6))
        image_path = []
        idx = 0
        end = time.time()
        for i, (input_list, target, target_weight, meta) in enumerate(val_loader):
            input_list = relocate_to_cuda(input_list, non_blocking=True)
            target = relocate_to_cuda(target, non_blocking=True) # [sum(length), 17, h, w]
            target_weight = relocate_to_cuda(target_weight, non_blocking=True)
            meta = relocate_to_cuda(meta, non_blocking=True)

            # compute output
            samples = {
                'x': input_list,
                'length': meta['length'],
                'hoi': meta['hoi']
            }
            outputs, _ = model(samples)     # [bs*N, 17, h, w]

            output = outputs['pose_hoi']

            if self.cfg.TEST.FLIP_TEST:
                # this part is ugly, because pytorch has not supported negative index
                # input_flipped = model(input[:, :, :, ::-1])
                # input = concat_batch(input_list)
                # input_flipped = np.flip(input.cpu().numpy(), 3).copy()
                # input_flipped = torch.from_numpy(input_flipped).cuda()
                # input_flipped_list = []
                # person_num = 0
                # for items in meta['length']:
                #     next_id = person_num+len(items)
                #     batch_data = input_flipped[person_num:next_id]
                #     temp = []
                #     for item in batch_data:
                #         temp.append(item)
                #     input_flipped_list.append(temp)
                #     person_num = next_id

                flip_hoi = meta['hoi']
                for hoi_item in flip_hoi:
                    if hoi_item is None:    continue
                    hoi_item['input'] = torch.from_numpy(np.flip(hoi_item['input'].unsqueeze(0).cpu().numpy(), 3).copy()).cuda()
                    hoi_item['input'] = hoi_item['input'].squeeze(0)
                # with autocast():
                input_flipped = np.flip(input_list.cpu().numpy(), 3).copy()
                input_flipped = torch.from_numpy(input_flipped).cuda()
                outputs_flipped, _ = model({
                    'x': input_flipped,
                    'length': meta['length'],
                    'hoi': flip_hoi
                })

                output_flipped = outputs_flipped['pose_hoi']

                output_flipped = flip_back(output_flipped.cpu().numpy(),
                                        val_dataset.flip_pairs)
                output_flipped = torch.from_numpy(output_flipped.copy()).cuda()

                output = (output + output_flipped) * 0.5

            num_images = target.size(0)
            # measure accuracy and record loss
            
            _, avg_acc, cnt, pred = accuracy(output.cpu().numpy(),
                                            target.cpu().numpy())

            acc.update(avg_acc, cnt)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if self.cfg.TEST.USE_GT_BBOX:
                c = meta['center'].cpu().numpy()
                s = meta['scale'].cpu().numpy()
                score = meta['score'].cpu().numpy()
            else:
                c = meta['center'][0].numpy()
                s = meta['scale'][0].numpy()
                score = meta['score'][0].numpy()

            preds, maxvals = get_final_preds(
                self.cfg, output.clone().cpu().numpy(), c, s)

            extend_pred = np.zeros((num_images, self.cfg.MODEL.NUM_JOINTS, 3), dtype=np.float32)
            extend_boxes = np.zeros((num_images, 6), dtype=np.float32)
            all_preds = np.concatenate((all_preds, extend_pred), axis=0)
            all_boxes = np.concatenate((all_boxes, extend_boxes), axis=0)

            all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
            all_preds[idx:idx + num_images, :, 2:3] = maxvals
            # double check this all_boxes parts
            all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
            all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
            all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1)
            all_boxes[idx:idx + num_images, 5] = score
            
            for human_num, img in zip(meta['length'], meta['image']):
                num = len(human_num) 
                image_path.extend([img for _ in range(num)])

            idx += num_images

            if ddp_opx.is_main_process() and i % self.cfg.PRINT_FREQ == 0:
                msg = 'Test: [{0}/{1}]\t' \
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
                    'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                        i, len(val_loader), batch_time=batch_time,
                        acc=acc)
                logger.info(msg)

                if self.cfg.DEBUG.DEBUG:
                    prefix = '{}_{}'.format(
                        os.path.join(output_dir, 'val'), i
                    )
                    # save_debug_images(self.cfg, concat_batch(input_list), meta, target, pred*4, output, prefix)
        
        all_preds = all_preds[:idx,...]
        all_boxes = all_boxes[:idx,...]

        assert all_preds.shape[0] == all_boxes.shape[0] == len(image_path)

        return {
            'all_preds': all_preds, 
            'all_boxes': all_boxes, 
            'image_path': image_path
        }
            
    def merge_dicts(self, dict_list):
        def merge_each(a, b):
            assert type(a) == type(b)
            if isinstance(a, np.ndarray):
                return np.concatenate([a, b], axis=0)
            elif isinstance(a, list):
                return a + b
            else:
                ValueError(f'Type {type(a)} data is not supported!')

        result = dict_list[0]
        for i in range(1, len(dict_list)):
            dict_item = dict_list[i]
            for key, val in dict_item.items():
                result[key] = merge_each(result[key], val)
        return result

    def evaluate_kpts(self, data, val_dataset, output_dir, writer_dict=None):
        all_preds, all_boxes, image_path = data['all_preds'], data['all_boxes'], data['image_path']
        name_values, perf_indicator = val_dataset.evaluate(
            self.cfg, all_preds, output_dir, all_boxes, image_path,
        )

        model_name = self.cfg.MODEL.NAME
        if isinstance(name_values, list):
            for name_value in name_values:
                _print_name_value(name_value, model_name)
        else:
            _print_name_value(name_values, model_name)

        if writer_dict:
            self.log_records('valid', writer_dict, name_values=name_values)

        return perf_indicator

    def log_records(self, mode, writer_dict, **kwargs):
        writer = writer_dict['writer']
        if mode == 'train':
            msg = 'Epoch: [{0}][{1}/{2}]\t' \
                'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
                'Speed {speed:.1f} samples/s\t' \
                'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
                'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
                'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
                    kwargs['epoch'], kwargs['now'], kwargs['length'], 
                    batch_time=kwargs['batch_time'], speed=kwargs['speed'],
                    data_time=kwargs['data_time'], loss=kwargs['losses'], acc=kwargs['acc'])
            logger.info(msg)

            global_steps = writer_dict['train_global_steps']
            writer.add_scalar('train/acc', kwargs['acc'].val, global_steps)
            for key, item in kwargs.items():
                if 'loss' in key:
                    writer.add_scalar(f'train/{key}', item.val, global_steps)

            writer_dict['train_global_steps'] = global_steps + 1
        else:
            global_steps = writer_dict['valid_global_steps']
            writer.add_scalars(
                'valid/metric',
                dict(kwargs['name_values']),
                global_steps
            )
            writer_dict['valid_global_steps'] = global_steps + 1


class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count if self.count != 0 else 0


# markdown format output
def _print_name_value(name_value, full_arch_name):
    names = list(name_value.keys())
    values = list(name_value.values())
    table = pylsytable(names)
    logger.info('\nArch: ' + full_arch_name)
    for name, value in zip(names, values):
        table.add_data(str(name), round(value, 3))
    logger.info(table)


from torch import Tensor
from typing import Optional, Union, List, Tuple, Dict, TypeVar
GenericTensor = TypeVar('GenericTensor', Tensor, List[Tensor], Tuple[Tensor, ...], Dict[str, Tensor])

def relocate_to_device(
        x: GenericTensor, ignore: bool = False,
        device: Optional[Union[torch.device, str, int]] = None,
        **kwargs
    ) -> GenericTensor:
    """
    Relocate data to specified device recursively
    
    Parameters:
    -----------
    x: Tensor, List[Tensor], Tuple[Tensor] or Dict[Tensor]
        Generic tensor data to be relocated
    device: torch.device, str or int
        Destination device
    ignore: bool
        If True, ignore unsupported data type and throw a warning.
    kwargs: dict
        Refer to torch.Tensor.to() for keyworded arguments

    Returns:
    --------
    Tensor, List[Tensor], Tuple[Tensor] or Dict[Tensor]
        Relocated tensor data
    """
    if isinstance(x, torch.Tensor):
        return x.to(device, **kwargs)
    elif x is None:
        return x
    elif isinstance(x, list):
        return [relocate_to_device(item, ignore, device, **kwargs) for item in x]
    elif isinstance(x, tuple):
        return tuple(relocate_to_device(item, ignore, device, **kwargs) for item in x)
    elif isinstance(x, dict):
        for key in x:
            x[key] = relocate_to_device(x[key], ignore, device, **kwargs)
        return x
    elif not ignore:
        raise TypeError('Unsupported type of data {}'.format(type(x)))


def relocate_to_cuda(
        x: GenericTensor, ignore: bool = False,
        device: Optional[Union[torch.device, int]] = None,
        **kwargs
    ) -> GenericTensor:
    """
    Relocate data to CUDA recursively
    
    Parameters:
    -----------
    x: Tensor, List[Tensor], Tuple[Tensor] or Dict[Tensor]
        Generic tensor data to be relocated
    ignore: bool
        If True, ignore unsupported data type and throw a warning.
    device: torch.device or int
        Destination device
    kwargs: dict
        Refer to torch.Tensor.cuda() for keyworded arguments

    Returns:
    --------
    Tensor, List[Tensor], Tuple[Tensor] or Dict[Tensor]
        Relocated tensor data
    """
    if isinstance(x, torch.Tensor):
        return x.cuda(device, **kwargs)
    elif x is None:
        return x
    elif isinstance(x, list):
        return [relocate_to_cuda(item, ignore, device, **kwargs) for item in x]
    elif isinstance(x, tuple):
        return tuple(relocate_to_cuda(item, ignore, device, **kwargs) for item in x)
    elif isinstance(x, dict):
        for key in x:
            x[key] = relocate_to_cuda(x[key], ignore, device, **kwargs)
        return x
    elif not ignore:
        return x
        raise TypeError('Unsupported type of data {}'.format(type(x)))


def relocate_to_cpu(x: GenericTensor, ignore: bool = False) -> GenericTensor:
    """Relocate data to cpu recursively"""
    if isinstance(x, Tensor):
        return x.cpu()
    elif x is None:
        return x
    elif isinstance(x, list):
        return [relocate_to_cpu(item, ignore=ignore) for item in x]
    elif isinstance(x, tuple):
        return tuple(relocate_to_cpu(item, ignore=ignore) for item in x)
    elif isinstance(x, dict):
        for key in x:
            x[key] = relocate_to_cpu(x[key], ignore=ignore)
        return x
    elif not ignore:
        raise TypeError('Unsupported type of data {}'.format(type(x)))
