"""
Unary-pairwise transformer for human-object interaction detection

Fred Zhang <frederic.zhang@anu.edu.au>

The Australian National University
Australian Centre for Robotic Vision
"""

import os
import cv2
import torch
import numpy as np

from torch import nn, Tensor
from typing import Optional, List
from torchvision.transforms import transforms
from torchvision.ops.boxes import batched_nms
from torchvision.ops.boxes import box_iou

from models.upt_pose.interaction_head import InteractionHead
from models.upt_pose.detr import build as build_model
from models.pose.interNet import build_pose_net

from utils import ddp_opx
from utils import box_ops
from utils.misc import nested_tensor_from_tensor_list
from dataset.vcoco_pose import get_affine_transform, affine_transform


class UPT(nn.Module):
    """
    Unary-pairwise transformer

    Parameters:
    -----------
    detector: nn.Module
        Object detector (DETR)
    postprocessor: nn.Module
        Postprocessor for the object detector
    interaction_head: nn.Module
        Interaction head of the network
    """
    def __init__(self, cfg,
        detector: nn.Module,
        postprocessor: nn.Module,
        interaction_head: nn.Module
    ) -> None:
        super().__init__()
        self.cfg = cfg
        self.detector = detector
        self.postprocessor = postprocessor
        self.interaction_head = interaction_head
        self.pose_net = build_pose_net(cfg)

        self.num_joints = cfg.MODEL.NUM_JOINTS
        self.human_idx = cfg.DATASET.HUMAN_IDX
        self.num_classes = cfg.DATASET.NUM_CLASSES

        self.box_score_thresh = cfg.DATASET.BOX_SCORE_THRESH
        self.fg_iou_thresh = cfg.DATASET.FG_IOU_THRESH

        self.min_instances = cfg.DATASET.MIN_INSTANCES
        self.max_instances = cfg.DATASET.MAX_INSTANCES

    def recover_boxes(self, boxes, size):
        boxes = box_ops.box_cxcywh_to_xyxy(boxes)
        h, w = size
        scale_fct = torch.stack([w, h, w, h])
        boxes = boxes * scale_fct
        return boxes

    def prepare_region_proposals(self, results, hidden_states):
        region_props = []
        for res, hs in zip(results, hidden_states):
            sc, lb, bx = res.values()

            keep = batched_nms(bx, sc, lb, 0.5)
            sc = sc[keep].view(-1)
            lb = lb[keep].view(-1)
            bx = bx[keep].view(-1, 4)
            hs = hs[keep].view(-1, 256)

            keep = torch.nonzero(sc >= self.box_score_thresh).squeeze(1)

            is_human = lb == self.human_idx
            hum = torch.nonzero(is_human).squeeze(1)
            obj = torch.nonzero(is_human == 0).squeeze(1)
            n_human = is_human[keep].sum(); n_object = len(keep) - n_human
            # Keep the number of human and object instances in a specified interval
            if n_human < self.min_instances:
                keep_h = sc[hum].argsort(descending=True)[:self.min_instances]
                keep_h = hum[keep_h]
            elif n_human > self.max_instances:
                keep_h = sc[hum].argsort(descending=True)[:self.max_instances]
                keep_h = hum[keep_h]
            else:
                keep_h = torch.nonzero(is_human[keep]).squeeze(1)
                keep_h = keep[keep_h]

            if n_object < self.min_instances:
                keep_o = sc[obj].argsort(descending=True)[:self.min_instances]
                keep_o = obj[keep_o]
            elif n_object > self.max_instances:
                keep_o = sc[obj].argsort(descending=True)[:self.max_instances]
                keep_o = obj[keep_o]
            else:
                keep_o = torch.nonzero(is_human[keep] == 0).squeeze(1)
                keep_o = keep[keep_o]

            keep = torch.cat([keep_h, keep_o])

            region_props.append(dict(
                boxes=bx[keep],
                scores=sc[keep],
                labels=lb[keep],
                hidden_states=hs[keep]
            ))

        return region_props

    def postprocessing(self, boxes, bh, bo, logits, prior, objects, attn_maps, image_sizes):
        n = [len(b) for b in bh]
        logits = logits.split(n)

        detections = []
        for bx, h, o, lg, pr, obj, attn, size in zip(
            boxes, bh, bo, logits, prior, objects, attn_maps, image_sizes
        ):
            pr = pr.prod(0)
            x, y = torch.nonzero(pr).unbind(1)
            scores = torch.sigmoid(lg[x, y])
            detections.append(dict(
                boxes=bx, pairing=torch.stack([h[x], o[x]]),
                scores=scores * pr[x, y], labels=y,
                objects=obj[x], attn_maps=attn, size=size
            ))

        return detections

    def forward(self,
        images: List[Tensor],
        targets: Optional[List[dict]] = None,
        is_train: bool = True
    ) -> List[dict]:
        """
        Parameters:
        -----------
        images: List[Tensor]
            Input images in format (C, H, W)
        targets: List[dict], optional
            Human-object interaction targets

        Returns:
        --------
        results: List[dict]
            Detected human-object interactions. Each dict has the following keys:
            `boxes`: torch.Tensor
                (N, 4) Bounding boxes for detected human and object instances
            `pairing`: torch.Tensor
                (2, M) Pairing indices, with human instance preceding the object instance
            `scores`: torch.Tensor
                (M,) Interaction score for each pair
            `labels`: torch.Tensor
                (M,) Predicted action class for each pair
            `objects`: torch.Tensor
                (M,) Predicted object class for each pair
            `attn_maps`: list
                Attention weights in the cooperative and competitive layers
            `size`: torch.Tensor
                (2,) Image height and width
        """
        if self.training and targets is None:
            raise ValueError("In training mode, targets should be passed")
        image_sizes = torch.as_tensor([
            im.size()[-2:] for im in images
        ], device=images[0].device)

        if isinstance(images, (list, torch.Tensor)):
            images = nested_tensor_from_tensor_list(images)
        features, pos = self.detector.backbone(images)

        src, mask = features[-1].decompose()
        assert mask is not None
        hs = self.detector.transformer(self.detector.input_proj(src), mask, self.detector.query_embed.weight, pos[-1])[0]

        outputs_class = self.detector.class_embed(hs)
        outputs_coord = self.detector.bbox_embed(hs).sigmoid()

        results = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}
        results = self.postprocessor(results, image_sizes)
        region_props = self.prepare_region_proposals(results, hs[-1])

        pose_input = self.get_pose_input(targets, region_props, self.cfg.MODEL.BOX_TYPE)
        pose_info, keypoints_loss = self.pose_net(pose_input)

        logits, prior, bh, bo, objects, attn_maps = self.interaction_head(
            features[-1].tensors, image_sizes, region_props, pose_info
        )
        boxes = [r['boxes'] for r in region_props]

        if is_train:
            return boxes, bh, bo, logits, prior, objects, attn_maps, image_sizes, keypoints_loss
        else:
            return boxes, bh, bo, logits, prior, objects, attn_maps, image_sizes

    def get_idx_by_iou(self, a, b):
        """ a.shape -> [N, 4] | b.shape -> [M, 4]
        计算出 N*M 的IOU矩阵(N<M), 选取出N个idx保留
        """
        ious = box_iou(a, b)
        # remain_idx = torch.argmax(ious, dim=1, keepdim=False)
        gt = torch.ge(ious, 0.85)   # TODO 需要动态设置
        _, remain_idx = torch.nonzero(gt).unbind(1)
        return remain_idx
    
    def get_pose_input(self, targets, region_props, mode='detection'):
        self.image_size = np.array(self.cfg.MODEL.IMAGE_SIZE)
        self.heatmap_size = np.array(self.cfg.MODEL.HEATMAP_SIZE)
        self.normalize_img = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(
                mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
            )
        ])

        if mode == 'detection':
            return self.get_pose_input_detection(targets, region_props)
        else:
            return self.get_pose_input_supervision(targets, region_props)

    def get_pose_input_detection(self, targets, region_props):
        pose_input = []
        device = region_props[0]['boxes'].device
        for prop, target in zip(region_props, targets):
            if 'pose' not in target:
                pose_input.append(None)
                continue
            pose_anno = target['pose']
            data_numpy = pose_anno['img']

            labels = prop['labels']
            is_human = labels == 0
            h_idx = torch.nonzero(is_human).squeeze(1)
            if h_idx.shape[0] == 0:
                pose_input.append(None)
                continue
            detect_h_bboxes = prop['boxes'][h_idx]
            
            inputs = []
            for box in detect_h_bboxes:
                x1, y1, x2, y2 = box.cpu()
                w = np.abs(x2 - x1)
                h = np.abs(y2 - y1)
                c, s = self._xywh2cs(x1, y1, w, h)
                trans = get_affine_transform(c, s, 0, self.image_size)

                input = cv2.warpAffine(
                    data_numpy,
                    trans,
                    (int(self.image_size[0]), int(self.image_size[1])),
                    flags=cv2.INTER_LINEAR)
                inputs.append(self.normalize_img(input))
            pose_input.append({
                'input': torch.stack(inputs, dim=0).to(device)
            })
        
        return pose_input

    def get_pose_input_supervision(self, targets, region_props):
        
        pose_input = []
        device = targets[0]['boxes_h'].device
        # => 拿到Pose的input, target, target_weight
        for idx, target in enumerate(targets):
            if 'pose' not in target:
                pose_input.append(None)
                continue
            pose_anno = target['pose']
            boxes_pose, data_numpy = pose_anno['boxes_pose'], pose_anno['img']
            joints_anno, joints_vis_anno = pose_anno['joints'], pose_anno['joints_vis']
            
            inputs, target_list, target_weight_list = [], [], []
            for box, joints, joints_vis in zip(boxes_pose, joints_anno, joints_vis_anno):
                joints, joints_vis = joints.cpu().numpy(), joints_vis.cpu().numpy()
                x1, y1, x2, y2 = box.cpu()
                w = np.abs(x2 - x1)
                h = np.abs(y2 - y1)
                c, s = self._xywh2cs(x1, y1, w, h)

                joints_heatmap = joints.copy()
                trans = get_affine_transform(c, s, 0, self.image_size)
                trans_heatmap = get_affine_transform(c, s, 0, self.heatmap_size)

                input = cv2.warpAffine(
                    data_numpy,
                    trans,
                    (int(self.image_size[0]), int(self.image_size[1])),
                    flags=cv2.INTER_LINEAR)
                for i in range(self.num_joints):
                    if joints_vis[i, 0] > 0.0:  # 跟着input一起trans
                        joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)
                        joints_heatmap[i, 0:2] = affine_transform(joints_heatmap[i, 0:2], trans_heatmap)

                inputs.append(self.normalize_img(input))
                target, target_weight = self.generate_target(joints_heatmap, joints_vis)
                target_list.append(torch.from_numpy(target))
                target_weight_list.append(torch.from_numpy(target_weight))
                # cv2.imwrite(f'hoi_human_cv2_{i+1}.png', input)
            
            pose_input.append({
                'boxes_pose': boxes_pose,
                'input': torch.stack(inputs, dim=0).to(device),
                'target': torch.stack(target_list, dim=0).to(device),
                'target_weight': torch.stack(target_weight_list, dim=0).to(device),
            })
        
        # => 根据DETR检测见过筛选Pose的数据
        for b_idx, pose_item in enumerate(pose_input):
            if pose_item is None:
                continue

            prop = region_props[b_idx]
            detect_h_bboxes, scores, labels = prop['boxes'], prop['scores'], prop['labels']
            is_human = labels == 0
            h_idx = torch.nonzero(is_human).squeeze(1)
            detect_h_bboxes, scores = detect_h_bboxes[h_idx], scores[h_idx]
            pose_boxes = pose_item['boxes_pose']
            # DETR huamn 顺序未变, 只保留+更改pose数据
            pose_remain_idx = self.get_idx_by_iou(detect_h_bboxes, pose_boxes)
            if pose_remain_idx.shape[0] == 0:   # detect_h_bboxes和pose_boxes匹配后没满足条件的BOX
                pose_item['hoi_with_pose_idx'] = None 
                continue

            pose_item['input'] = pose_item['input'][pose_remain_idx]
            pose_item['target'] = pose_item['target'][pose_remain_idx]
            pose_item['target_weight'] = pose_item['target_weight'][pose_remain_idx]
            pose_item['boxes_pose'] = pose_item['boxes_pose'][pose_remain_idx]

            # print(f'detect_h_bboxes:{detect_h_bboxes.shape[0]} pose_boxes={pose_boxes.shape[0]} | remain_idx={pose_remain_idx.shape[0]}')
            
            hoi_pose_idx = self.get_idx_by_iou(pose_item.pop('boxes_pose'), detect_h_bboxes)
            pose_item['hoi_with_pose_idx'] = hoi_pose_idx   # Pose信息对应第几个detect human
            pose_input[b_idx] = pose_item

        return pose_input

    def _xywh2cs(self, x, y, w, h):
        self.aspect_ratio = self.cfg.MODEL.IMAGE_SIZE[0] * 1.0 / self.cfg.MODEL.IMAGE_SIZE[1]
        self.pixel_std = 200
        center = np.zeros((2), dtype=np.float32)
        center[0] = x + (w - 1) * 0.5
        center[1] = y + (h - 1) * 0.5
        if w > self.aspect_ratio * h:
            h = w * 1.0 / self.aspect_ratio
        elif w < self.aspect_ratio * h:
            w = h * self.aspect_ratio
        scale = np.array(
            [w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],
            dtype=np.float32)
        if center[0] != -1:
            scale = scale * 1.25

        return center, scale
    
    def generate_target(self, joints, joints_vis):
        '''
        :param joints:  [num_joints, 3]
        :param joints_vis: [num_joints, 3]
        :return: target, target_weight(1: visible, 0: invisible)
        '''
        target_weight = np.ones((self.num_joints, 1), dtype=np.float32)
        target_weight[:, 0] = joints_vis[:, 0]

        assert self.cfg.MODEL.TARGET_TYPE == 'gaussian', \
            'Only support gaussian map now!'

        if self.cfg.MODEL.TARGET_TYPE == 'gaussian':
            target = np.zeros((self.num_joints,
                               self.heatmap_size[1],
                               self.heatmap_size[0]),
                              dtype=np.float32)

            tmp_size = self.cfg.MODEL.SIGMA * 3

            for joint_id in range(self.num_joints):
                target_weight[joint_id] = \
                    self.adjust_target_weight(joints[joint_id], target_weight[joint_id], tmp_size)
                
                if target_weight[joint_id] == 0:
                    continue

                mu_x = joints[joint_id][0]
                mu_y = joints[joint_id][1]
                
                x = np.arange(0, self.heatmap_size[0], 1, np.float32)
                y = np.arange(0, self.heatmap_size[1], 1, np.float32)
                y = y[:, np.newaxis]

                v = target_weight[joint_id]
                if v > 0.5:
                    target[joint_id] = np.exp(- ((x - mu_x) ** 2 + (y - mu_y) ** 2) / (2 * self.cfg.MODEL.SIGMA ** 2))

        if self.cfg.LOSS.USE_DIFFERENT_JOINTS_WEIGHT:
            target_weight = np.multiply(target_weight, 1)

        return target, target_weight

    def adjust_target_weight(self, joint, target_weight, tmp_size):
        # feat_stride = self.image_size / self.heatmap_size
        mu_x = joint[0]
        mu_y = joint[1]
        # Check that any part of the gaussian is in-bounds
        ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
        br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
        if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \
                or br[0] < 0 or br[1] < 0:
            # If not, just return the image as is
            target_weight = 0

        return target_weight



def build_detector(cfg, args, class_corr):
    detr, _, postprocessors = build_model(cfg, args)
    if os.path.exists(cfg.MODEL.PRETRAINED):
        if ddp_opx.is_main_process():
            print(f"Load weights for the object detector from {cfg.MODEL.PRETRAINED}")
        detr.load_state_dict(torch.load(cfg.MODEL.PRETRAINED, map_location='cpu')['model_state_dict'])
    predictor = torch.nn.Linear(cfg.MODEL.REPR_DIM * 2, cfg.DATASET.NUM_CLASSES)
    interaction_head = InteractionHead(
        predictor, cfg.MODEL.HIDDEN_DIM, cfg.MODEL.REPR_DIM,
        detr.backbone[0].num_channels,
        cfg.DATASET.NUM_CLASSES, cfg.DATASET.HUMAN_IDX, class_corr
    )
    detector = UPT(cfg,
        detr, postprocessors['bbox'], interaction_head
    )
    if cfg.MODEL.SCRATCH == False:
        detector.load_state_dict(torch.load('checkpoints/upt-r50-vcoco.pt', map_location='cpu'), strict=False)
        if ddp_opx.is_main_process():
            print(f"Load weights {cfg.MODEL.PRETRAINED} to finetune with pose")
    return detector
