"""
Unary-pairwise transformer for human-object interaction detection

Fred Zhang <frederic.zhang@anu.edu.au>

The Australian National University
Australian Centre for Robotic Vision
"""

import os
import torch
import torch.distributed as dist


from torch import nn, Tensor
from typing import Optional, List
from torchvision.ops.boxes import batched_nms, box_iou

from models.upt_fasterrcnn.interaction_head import InteractionHead
from models.upt_fasterrcnn.faster_rcnn import fasterrcnn_resnet_fpn
from utils.misc import nested_tensor_from_tensor_list
from utils import ddp_opx

from typing import Optional, List, Tuple
from torchvision.models.detection import transform
import logging

logger = logging.getLogger(__name__)

class UPT_Fasterrcnn(nn.Module):
    """
    Unary-pairwise transformer

    Parameters:
    -----------
    detector: nn.Module
        Object detector (DETR)
    postprocessor: nn.Module
        Postprocessor for the object detector
    interaction_head: nn.Module
        Interaction head of the network
    human_idx: int
        Index of the human class
    """
    def __init__(self, cfg,
        backbone: nn.Module,
        interaction_head: nn.Module,
        alpha: float = 0.5, gamma: float = 2.0,
    ) -> None:
        super().__init__()
        self.cfg = cfg
        self.backbone = backbone
        self.interaction_head = interaction_head

        self.human_idx = cfg.DATASET.HUMAN_IDX
        self.num_classes = cfg.DATASET.NUM_CLASSES

        self.box_score_thresh = cfg.DATASET.BOX_SCORE_THRESH
        self.fg_iou_thresh = cfg.DATASET.FG_IOU_THRESH

        self.min_instances = cfg.DATASET.MIN_INSTANCES
        self.max_instances = cfg.DATASET.MAX_INSTANCES

        self.alpha = alpha
        self.gamma = gamma

    def associate_with_ground_truth(self, boxes_h, boxes_o, targets):
        n = boxes_h.shape[0]
        labels = torch.zeros(n, self.num_classes, device=boxes_h.device)

        gt_bx_h = targets['boxes_h']
        gt_bx_o = targets['boxes_o']

        x, y = torch.nonzero(torch.min(
            box_iou(boxes_h, gt_bx_h),
            box_iou(boxes_o, gt_bx_o)
        ) >= self.fg_iou_thresh).unbind(1)

        labels[x, targets['labels'][y]] = 1

        return labels

    def compute_interaction_loss(self, boxes, bh, bo, logits, prior, targets):
        labels = torch.cat([
            self.associate_with_ground_truth(bx[h], bx[o], target)
            for bx, h, o, target in zip(boxes, bh, bo, targets)
        ])
        prior = torch.cat(prior, dim=1).prod(0)
        x, y = torch.nonzero(prior).unbind(1)
        logits = logits[x, y]; prior = prior[x, y]; labels = labels[x, y]

        n_p = len(torch.nonzero(labels))
        if dist.is_initialized():
            world_size = dist.get_world_size()
            n_p = torch.as_tensor([n_p], device='cuda')
            dist.barrier()
            dist.all_reduce(n_p)
            n_p = (n_p / world_size).item()

        loss = self.binary_focal_loss_with_logits(
            torch.log(
                prior / (1 + torch.exp(-logits) - prior) + 1e-8
            ), labels, reduction='sum',
            alpha=self.alpha, gamma=self.gamma
        )

        return loss / n_p

    def binary_focal_loss_with_logits(
        self,
        x: Tensor, y: Tensor,
        alpha: float = 0.5,
        gamma: float = 2.0,
        reduction: str = 'mean',
        eps: float = 1e-6
    ) -> Tensor:
        """
        Focal loss by Lin et al.
        https://arxiv.org/pdf/1708.02002.pdf

        L = - |1-y-alpha| * |y-x|^{gamma} * log(|1-y-x|)

        Parameters:
        -----------
        x: Tensor[N, K]
            Post-normalisation scores
        y: Tensor[N, K]
            Binary labels
        alpha: float
            Hyper-parameter that balances between postive and negative examples
        gamma: float
            Hyper-paramter suppresses well-classified examples
        reduction: str
            Reduction methods
        eps: float
            A small constant to avoid NaN values from 'PowBackward'

        Returns:
        --------
        loss: Tensor
            Computed loss tensor
        """
        loss = (1 - y - alpha).abs() * ((y-torch.sigmoid(x)).abs() + eps) ** gamma * \
            torch.nn.functional.binary_cross_entropy_with_logits(
                x, y, reduction='none'
            )
        if reduction == 'mean':
            return loss.mean()
        elif reduction == 'sum':
            return loss.sum()
        elif reduction == 'none':
            return loss
        else:
            raise ValueError("Unsupported reduction method {}".format(reduction))


    def prepare_region_proposals(self, results, hidden_states):
        region_props = []
        for res, hs in zip(results, hidden_states):
            sc, lb, bx = res.values()

            keep = batched_nms(bx, sc, lb, 0.5)
            sc = sc[keep].view(-1)
            lb = lb[keep].view(-1)
            bx = bx[keep].view(-1, 4)
            hs = hs[keep].view(-1, 256)

            keep = torch.nonzero(sc >= self.box_score_thresh).squeeze(1)

            is_human = lb == self.human_idx
            hum = torch.nonzero(is_human).squeeze(1)
            obj = torch.nonzero(is_human == 0).squeeze(1)
            n_human = is_human[keep].sum(); n_object = len(keep) - n_human
            # Keep the number of human and object instances in a specified interval
            if n_human < self.min_instances:
                keep_h = sc[hum].argsort(descending=True)[:self.min_instances]
                keep_h = hum[keep_h]
            elif n_human > self.max_instances:
                keep_h = sc[hum].argsort(descending=True)[:self.max_instances]
                keep_h = hum[keep_h]
            else:
                keep_h = torch.nonzero(is_human[keep]).squeeze(1)
                keep_h = keep[keep_h]

            if n_object < self.min_instances:
                keep_o = sc[obj].argsort(descending=True)[:self.min_instances]
                keep_o = obj[keep_o]
            elif n_object > self.max_instances:
                keep_o = sc[obj].argsort(descending=True)[:self.max_instances]
                keep_o = obj[keep_o]
            else:
                keep_o = torch.nonzero(is_human[keep] == 0).squeeze(1)
                keep_o = keep[keep_o]

            keep = torch.cat([keep_h, keep_o])

            region_props.append(dict(
                boxes=bx[keep],
                scores=sc[keep],
                labels=lb[keep],
                hidden_states=hs[keep]
            ))

        return region_props

    def postprocessing(self, boxes, bh, bo, logits, prior, objects, attn_maps, image_sizes):
        n = [len(b) for b in bh]
        logits = logits.split(n)

        detections = []
        for bx, h, o, lg, pr, obj, attn, size in zip(
            boxes, bh, bo, logits, prior, objects, attn_maps, image_sizes
        ):
            pr = pr.prod(0)
            x, y = torch.nonzero(pr).unbind(1)
            scores = torch.sigmoid(lg[x, y])
            detections.append(dict(
                boxes=bx, pairing=torch.stack([h[x], o[x]]),
                scores=scores * pr[x, y], labels=y,
                objects=obj[x], attn_maps=attn, size=size
            ))

        return detections

    def preprocess_boxes_wenjin(self,
        targets: List[dict],
        append_gt: Optional[bool] = None
    ) -> None:
        results = []
        target = targets[0]
        n = target["boxes_h"].shape[0]
        device = target["boxes_h"].device
    
        # TODO: box 为负是由于 gtbox 随原图旋转，坐标值溢出
        boxes = torch.cat([target["boxes_h"], target["boxes_o"]])
        scores = torch.cat([torch.ones(2 * n, device=device)])
        labels = torch.cat([
                self.human_idx * torch.ones(n, device=device).long(),
                target["object"]
            ])

        # Keep a fixed number of detections
        h_idx = torch.nonzero(labels == self.human_idx).squeeze(1)
        o_idx = torch.nonzero(labels != self.human_idx).squeeze(1)
        if len(h_idx) > self.max_instances:
            h_idx = h_idx[:self.max_instances]
        if len(o_idx) > self.max_instances:
            o_idx = o_idx[:self.max_instances]
        # Permute humans to the top
        keep_idx = torch.cat([h_idx, o_idx])
        results.append(dict(
            boxes=boxes[keep_idx].view(-1, 4),
            labels=labels[keep_idx].view(-1),
            scores=scores[keep_idx].view(-1),
            h_num=n
        ))
        
        return results
    
    def preprocess_boxes(self,
        targets: List[dict],
        append_gt: Optional[bool] = None
    ) -> None:
        results = []
        for target in targets:
            n = target["boxes_h"].shape[0]
            m = target["boxes_o"].shape[0]
            device = target["boxes_h"].device
            boxes = torch.cat([target["boxes_h"], target["boxes_o"]])
            scores = torch.cat([torch.ones(n + m, device=device)])
            labels = torch.cat([
                    self.human_idx * torch.ones(n, device=device).long(),
                    target["object"]
                ])
            assert boxes.shape[0] == scores.shape[0] == labels.shape[0]
            # Keep a fixed number of detections
            h_idx = torch.nonzero(labels == self.human_idx).squeeze(1)
            o_idx = torch.nonzero(labels != self.human_idx).squeeze(1)
            if len(h_idx) > self.max_instances:
                h_idx = h_idx[:self.max_instances]
            if len(o_idx) > self.max_instances:
                o_idx = o_idx[:self.max_instances]
            # Permute humans to the top
            keep_idx = torch.cat([h_idx, o_idx])
            results.append(dict(
                boxes=boxes[keep_idx].view(-1, 4),
                labels=labels[keep_idx].view(-1),
                scores=scores[keep_idx].view(-1),
                h_num=n
            ))
                
        return results
    
    def forward(self, samples):
        images = []
        targets = []
        none_idx = []
        for i, sample in enumerate(samples):
            if sample is None:
                none_idx.append(i)
                continue

            images.append(sample['input'])
            targets.append(sample)

        image_sizes = torch.as_tensor([
            im.size()[-2:] for im in images
        ], device=images[0].device)

        if isinstance(images, (list, torch.Tensor)):
            images = nested_tensor_from_tensor_list(images)

        features = self.backbone(images.tensors)
        
        region_props = self.preprocess_boxes(targets)

        logits, prior, bh, bo, objects, attn_maps, unary_h_tokens = self.interaction_head(
            features, image_sizes, region_props
        )
        boxes = [r['boxes'] for r in region_props]

        interaction_loss = 0
        if self.training:
            interaction_loss = self.compute_interaction_loss(boxes, bh, bo, logits, prior, targets)
            if interaction_loss.isnan():
                raise ValueError(f"The interaction_loss is NaN!")
            
        detections = self.postprocessing(boxes, bh, bo, logits, prior, objects, attn_maps, image_sizes)
        
        for idx in none_idx:    # 无hoi的图像
            unary_h_tokens.insert(idx, None)
            detections.insert(idx, None)

        return detections, unary_h_tokens, interaction_loss


    def forward_wenjin(self,
        images: List[Tensor],
        targets: Optional[List[dict]] = None
    ) -> List[dict]:
        """
        Parameters:
        -----------
        images: List[Tensor]
            Input images in format (C, H, W)
        targets: List[dict], optional
            Human-object interaction targets

        Returns:
        --------
        results: List[dict]
            Detected human-object interactions. Each dict has the following keys:
            `boxes`: torch.Tensor
                (N, 4) Bounding boxes for detected human and object instances
            `pairing`: torch.Tensor
                (2, M) Pairing indices, with human instance preceding the object instance
            `scores`: torch.Tensor
                (M,) Interaction score for each pair
            `labels`: torch.Tensor
                (M,) Predicted action class for each pair
            `objects`: torch.Tensor
                (M,) Predicted object class for each pair
            `attn_maps`: list
                Attention weights in the cooperative and competitive layers
            `size`: torch.Tensor
                (2,) Image height and width
        """
        if self.training and targets is None:
            raise ValueError("In training mode, targets should be passed")
        image_sizes = torch.as_tensor([
            im.size()[-2:] for im in images
        ], device=images[0].device)

        if isinstance(images, (list, torch.Tensor)):
            images = nested_tensor_from_tensor_list(images)
        # features = self.backbone(images.tensors)
        
        region_props = self.preprocess_boxes(targets)

        # logits, prior, bh, bo, objects, attn_maps = self.interaction_head(
        #     features, image_sizes, region_props
        # )
        # boxes = [r['boxes'] for r in region_props]

        interaction_loss_total = 0
        # bh_list, bo_list, logits_list, prior_list, objects_list, attn_maps_list = [],[],[],[],[],[]
        unary_tokens_list = []
        for i in range(len(targets)):
            if targets[i] is None:
                # boxes += [None]
                # bh_list += [None]
                # bo_list += [None]
                # objects_list += [None]
                # attn_maps_list += [None]
                # logits_list += [None]
                # prior_list += [None]
                unary_tokens_list += [None]
            else:
                features = self.backbone(images.tensors[i:i+1])
                region_props = self.preprocess_boxes([targets[i]])

                # logits, prior, bh, bo, objects, attn_maps = self.interaction_head(
                #     features, image_sizes, region_props
                # )
                unary_tokens, interaction_loss = self.interaction_head(
                    features, image_sizes, region_props, [targets[i]]
                )
                # boxes += region_props[0]['boxes']
                # bh_list += bh
                # bo_list += bo
                # objects_list += objects
                # attn_maps_list += attn_maps
                # logits_list += logits
                # prior_list += prior
                unary_tokens_list += unary_tokens
                interaction_loss_total += interaction_loss

        # return boxes, bh, bo, logits, prior, objects, attn_maps, image_sizes
        return unary_tokens_list, interaction_loss


def build_upt_facter_rcnn(cfg, class_corr):
    
    detector = fasterrcnn_resnet_fpn(cfg.MODEL.BACKBONE, pretrained=True)
    backbone = detector.backbone
    
    predictor = torch.nn.Linear(cfg.MODEL.REPR_DIM * 2, cfg.DATASET.NUM_CLASSES)
    interaction_head = InteractionHead(
        predictor, cfg.MODEL.HIDDEN_DIM, cfg.MODEL.REPR_DIM,
        256,    # faster-rcnn输出的dim=256
        cfg.DATASET.NUM_CLASSES, cfg.DATASET.HUMAN_IDX, class_corr
    )
    model = UPT_Fasterrcnn(cfg,
        backbone, interaction_head,
    )
    pretrain_model = torch.load('checkpoints/upt-faster-2box-e15.pth', map_location=torch.device('cpu'))
    model.load_state_dict(pretrain_model['model_state_dict'])
    if ddp_opx.is_main_process():
        logger.info('=> load faster_rcnn_upt from pretrained checkpoints/upt-faster-2box-e15.pth')
        logger.info('=> frozen hoi net')
    for p in model.parameters():
        p.requires_grad = False
    model.eval()
    return model
