import os
import time
import pickle
import logging
import numpy as np

import torch
from utils import ddp_opx
from collections import defaultdict

logger = logging.getLogger(__name__)

class CacheTemplate(defaultdict):
    """A template for VCOCO cached results """
    def __init__(self, **kwargs):
        super().__init__()
        for k, v in kwargs.items():
            self[k] = v
    def __missing__(self, k):
        seg = k.split('_')
        # Assign zero score to missing actions
        if seg[-1] == 'agent':
            return 0.
        # Assign zero score and a tiny box to missing <action,role> pairs
        else:
            return [0., 0., .1, .1, 0.]

class Trainer():
    def __init__(self, args, cfg):
        self.args = args
        self.cfg = cfg
        self.max_norm = self.cfg.MODEL.CLIP_MAX_NORM

    def train_one_epoch(self, train_loader, model, criterion, optimizer, epoch, output_dir=None, writer_dict=None):
        model_name = self.cfg.MODEL.NAME
        model_name = model_name.upper()
        if model_name in ['UPT', 'UPT_FASTERRCNN']:
            self.train_upt(train_loader, model, criterion, optimizer, epoch, output_dir, writer_dict)
        if model_name == 'UPT_POSE':
            self.train_upt_pose(train_loader, model, criterion, optimizer, epoch, output_dir, writer_dict)
        else:
            TypeError(f"Do not supported {model_name}'s train now!!")

    def validate(self, val_loader, val_dataset, model, output_dir):
        model_name = self.cfg.MODEL.NAME
        model_name = model_name.upper()
        if model_name in ['UPT', 'UPT_FASTERRCNN', 'UPT_POSE']:
            return self.validate_upt(val_loader, val_dataset, model, output_dir)
        else:
            TypeError(f"Do not supported {model_name}'s validate now!!")

    def train_upt(self, train_loader, model, criterion, optimizer, epoch, output_dir, writer_dict):
        batch_time = AverageMeter()
        data_time = AverageMeter()
        losses = AverageMeter()

        # switch to train mode
        model.train()

        end = time.time()
        for i, (images, targets) in enumerate(train_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            images = relocate_to_cuda(images, non_blocking=True)
            targets = relocate_to_cuda(targets, non_blocking=True) # [sum(length), 17, h, w]

            boxes, bh, bo, logits, prior, objects, attn_maps, image_sizes = model(images, targets)     # [bs*N, 17, h, w]
            loss = criterion(boxes, bh, bo, logits, prior, targets)
            if loss.isnan():
                raise ValueError(f"The HOI loss is NaN for rank {self.args.gpu}")

            losses.update(loss.item(), len(images))
            
            optimizer.zero_grad(set_to_none=True)
            loss.backward()
            if self.max_norm > 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(), self.max_norm)
            optimizer.step()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            
            if ddp_opx.is_main_process() and i % self.cfg.PRINT_FREQ == 0:
                msg_dict = {
                    'epoch': epoch,
                    'now': i,
                    'length': len(train_loader),
                    'batch_time': batch_time,
                    'speed': len(images)/batch_time.val,
                    'data_time': data_time,
                    'losses': losses
                }
                self.log_records('train', writer_dict, **msg_dict)
    
    def train_upt_pose(self, train_loader, model, criterion, optimizer, epoch, output_dir, writer_dict):
        batch_time = AverageMeter()
        data_time = AverageMeter()
        loss_hoi = AverageMeter()
        loss_keypoints = AverageMeter()
        losses = AverageMeter()

        # switch to train mode
        model.train()

        end = time.time()
        for i, (images, targets) in enumerate(train_loader):
            # measure data loading time
            data_time.update(time.time() - end)

            images = relocate_to_cuda(images, non_blocking=True)
            targets = relocate_to_cuda(targets, non_blocking=True) # [sum(length), 17, h, w]

            boxes, bh, bo, logits, prior, objects, attn_maps, image_sizes, keypoints_loss = model(images, targets)     # [bs*N, 17, h, w]
            hoi_loss = criterion(boxes, bh, bo, logits, prior, targets)

            if hoi_loss.isnan():
                raise ValueError(f"The HOI loss is NaN for rank {self.args.gpu}")

            loss = hoi_loss + keypoints_loss

            loss_hoi.update(hoi_loss.item(), len(images))
            if keypoints_loss != 0:
                loss_keypoints.update(keypoints_loss.item(), len(images))
            losses.update(loss.item(), len(images))
            
            optimizer.zero_grad(set_to_none=True)
            loss.backward()
            if self.max_norm > 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(), self.max_norm)
            optimizer.step()

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            
            if ddp_opx.is_main_process() and i % self.cfg.PRINT_FREQ == 0:
                msg_dict = {
                    'epoch': epoch,
                    'now': i,
                    'length': len(train_loader),
                    'batch_time': batch_time,
                    'speed': len(images)/batch_time.val,
                    'data_time': data_time,
                    'losses': losses
                }
                if keypoints_loss != 0:
                    msg_dict['loss_hoi'] = loss_hoi
                    msg_dict['loss_keypoints'] = loss_keypoints
                self.log_records('train', writer_dict, **msg_dict)


    @torch.no_grad()
    def validate_upt(self, val_loader, val_dataset, model, output_dir):
        batch_time = AverageMeter()

        # switch to evaluate mode
        model.eval()
        # all_results = []
        all_results = {}
        end = time.time()
        for i, batch in enumerate(val_loader):
            inputs = relocate_to_cuda(batch[0])
            targets = relocate_to_cuda(batch[1])

            # output = model(inputs)
            boxes, bh, bo, logits, prior, objects, attn_maps, image_sizes = model(inputs, targets, is_train=False)
            outputs = model.postprocessing(boxes, bh, bo, logits, prior, objects, attn_maps, image_sizes)
            
            # compute output
            # Skip images without detections
            if outputs is None or len(outputs) == 0:
                continue
            # Batch size is fixed as 1 for inference
            # assert len(output) == 1, f"Batch size is not 1 but {len(output)}."
            # image_id = val_dataset.image_id(i)

            for idx, output_item in enumerate(outputs):
                output = relocate_to_cpu(output_item, ignore=True)
                # NOTE Index i is the intra-index amongst images excluding those
                # without ground truth box pairs
                
                # Format detections
                boxes = output['boxes']
                
                boxes_h, boxes_o = boxes[output['pairing']].unbind(0)
                scores = output['scores']
                actions = output['labels']
                # Rescale the boxes to original image size
                # ow, oh = val_dataset.image_size(i)
                
                image_id = targets[idx]['image_id']
                ow, oh = targets[idx]['ori_size']
                h, w = output['size']
                scale_fct = torch.as_tensor([
                    ow / w, oh / h, ow / w, oh / h
                ]).unsqueeze(0)
                boxes_h *= scale_fct
                boxes_o *= scale_fct
                
                for bh, bo, s, a in zip(boxes_h, boxes_o, scores, actions):
                    a_name = val_dataset.actions[a].split()
                    # result = CacheTemplate(image_id=image_id, person_box=bh.tolist())
                    result = CacheTemplate(person_box=bh.tolist())
                    result[a_name[0] + '_agent'] = s.item()
                    result['_'.join(a_name)] = bo.tolist() + [s.item()]
                    # all_results.append(result)
                    if image_id in all_results:
                        all_results[image_id].append(result)
                    else:
                        all_results[image_id] = [result]
                
            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if ddp_opx.is_main_process() and i % self.cfg.PRINT_FREQ == 0:
                msg = 'Test: [{0}/{1}]\t' \
                    'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
                        i, len(val_loader), batch_time=batch_time)
                logger.info(msg)

        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        with open(os.path.join(output_dir, 'cache.pkl'), 'wb') as f:
            # Use protocol 2 for compatibility with Python2
            pickle.dump(all_results, f)
        return all_results

    def evaluate_hoi(self, data, writer_dict=None):
        assert type(data) is dict
        from utils.vcoco_eval import VCOCOeval
        root = self.cfg.DATASET.ROOT
        vsrl_annot_file = os.path.join(root, 'annotations/vcoco_test.json')
        coco_file = os.path.join(root, 'annotations/instances_vcoco_all_2014.json')
        split_file = os.path.join(root, 'annotations/vcoco_test.ids')
        vcocoeval = VCOCOeval(vsrl_annot_file, coco_file, split_file)
        ap_dict = vcocoeval._do_eval_ap(data, ovr_thresh=0.5)
        if writer_dict:
            self.log_records('validate', writer_dict, ap_dict)

    def log_records(self, mode, writer_dict, **kwargs):
        writer = writer_dict['writer']
        if mode == 'train':
            msg = 'Epoch: [{0}][{1}/{2}]\t' \
                'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
                'Speed {speed:.1f} samples/s\t' \
                'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
                'Loss {loss.val:.5f} ({loss.avg:.5f})'.format(
                    kwargs['epoch'], kwargs['now'], kwargs['length'], 
                    batch_time=kwargs['batch_time'], speed=kwargs['speed'],
                    data_time=kwargs['data_time'], loss=kwargs['losses'])
            logger.info(msg)

            global_steps = writer_dict['train_global_steps']
            for key, item in kwargs.items():
                if 'loss' in key:
                    writer.add_scalar(f'train/{key}', item.val, global_steps)

            writer_dict['train_global_steps'] = global_steps + 1
        else:
            global_steps = writer_dict['valid_global_steps']
            writer.add_scalars(
                'validate',
                kwargs,
                global_steps
            )
            writer_dict['valid_global_steps'] = global_steps + 1


class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count if self.count != 0 else 0


from torch import Tensor
from typing import Optional, Union, List, Tuple, Dict, TypeVar
GenericTensor = TypeVar('GenericTensor', Tensor, List[Tensor], Tuple[Tensor, ...], Dict[str, Tensor])

def relocate_to_device(
        x: GenericTensor, ignore: bool = False,
        device: Optional[Union[torch.device, str, int]] = None,
        **kwargs
    ) -> GenericTensor:
    """
    Relocate data to specified device recursively
    
    Parameters:
    -----------
    x: Tensor, List[Tensor], Tuple[Tensor] or Dict[Tensor]
        Generic tensor data to be relocated
    device: torch.device, str or int
        Destination device
    ignore: bool
        If True, ignore unsupported data type and throw a warning.
    kwargs: dict
        Refer to torch.Tensor.to() for keyworded arguments

    Returns:
    --------
    Tensor, List[Tensor], Tuple[Tensor] or Dict[Tensor]
        Relocated tensor data
    """
    if isinstance(x, torch.Tensor):
        return x.to(device, **kwargs)
    elif x is None:
        return x
    elif isinstance(x, list):
        return [relocate_to_device(item, ignore, device, **kwargs) for item in x]
    elif isinstance(x, tuple):
        return tuple(relocate_to_device(item, ignore, device, **kwargs) for item in x)
    elif isinstance(x, dict):
        for key in x:
            x[key] = relocate_to_device(x[key], ignore, device, **kwargs)
        return x
    elif not ignore:
        raise TypeError('Unsupported type of data {}'.format(type(x)))


def relocate_to_cuda(
        x: GenericTensor, ignore: bool = False,
        device: Optional[Union[torch.device, int]] = None,
        **kwargs
    ) -> GenericTensor:
    """
    Relocate data to CUDA recursively
    
    Parameters:
    -----------
    x: Tensor, List[Tensor], Tuple[Tensor] or Dict[Tensor]
        Generic tensor data to be relocated
    ignore: bool
        If True, ignore unsupported data type and throw a warning.
    device: torch.device or int
        Destination device
    kwargs: dict
        Refer to torch.Tensor.cuda() for keyworded arguments

    Returns:
    --------
    Tensor, List[Tensor], Tuple[Tensor] or Dict[Tensor]
        Relocated tensor data
    """
    if isinstance(x, torch.Tensor):
        return x.cuda(device, **kwargs)
    elif x is None:
        return x
    elif isinstance(x, list):
        return [relocate_to_cuda(item, ignore, device, **kwargs) for item in x]
    elif isinstance(x, tuple):
        return tuple(relocate_to_cuda(item, ignore, device, **kwargs) for item in x)
    elif isinstance(x, dict):
        for key in x:
            x[key] = relocate_to_cuda(x[key], ignore, device, **kwargs)
        return x
    elif not ignore:
        return x
        raise TypeError('Unsupported type of data {}'.format(type(x)))


def relocate_to_cpu(x: GenericTensor, ignore: bool = False) -> GenericTensor:
    """Relocate data to cpu recursively"""
    if isinstance(x, Tensor):
        return x.cpu()
    elif x is None:
        return x
    elif isinstance(x, list):
        return [relocate_to_cpu(item, ignore=ignore) for item in x]
    elif isinstance(x, tuple):
        return tuple(relocate_to_cpu(item, ignore=ignore) for item in x)
    elif isinstance(x, dict):
        for key in x:
            x[key] = relocate_to_cpu(x[key], ignore=ignore)
        return x
    elif not ignore:
        raise TypeError('Unsupported type of data {}'.format(type(x)))
