import torch
import torchvision
import numpy as np
from torch import nn
import torchvision.models.detection

# inports for RCNN modules
from typing import Any, Optional
from torchvision.models.detection import KeypointRCNN

from torchvision.ops import MultiScaleRoIAlign

from torchvision.ops import misc as misc_nn_ops
from torchvision.models.resnet import resnet50
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone, _validate_trainable_layers

def keypointrcnn_resnet50_fpn(
    *,
    weights=None,
    progress: bool = True,
    num_classes: Optional[int] = None,
    num_keypoints: Optional[int] = None,
    weights_backbone=None,
    trainable_backbone_layers: Optional[int] = None,
    **kwargs: Any,
) -> KeypointRCNN:

    num_classes = 11
    num_keypoints = 17
    pretrained_backbone = False
    pretrained=False
    trainable_backbone_layers = _validate_trainable_layers(
        pretrained or pretrained_backbone, trainable_backbone_layers, 5, 3)
    backbone = resnet_fpn_backbone('resnet50', pretrained_backbone, trainable_layers=trainable_backbone_layers)
    model = KeypointRCNN(backbone, num_classes, num_keypoints=num_keypoints, **kwargs)

    
    exclude_keys=[
        'roi_heads.box_predictor.cls_score.weight', 
        'roi_heads.box_predictor.cls_score.bias',
        'roi_heads.box_predictor.bbox_pred.weight',
        'roi_heads.box_predictor.bbox_pred.bias'
    ]

    pretrained_weights = torch.load('./pretrained_weights/keypointrcnn_resnet50_fpn_coco-fc266e95.pth')
    pretrained_dict = {k: v for k, v in pretrained_weights.items() if k not in exclude_keys}
    model.load_state_dict(pretrained_dict, strict=False)

    return model



class LyPoseModel(nn.Module):
    """
        Inference from: torchvision.models.detection.keypoint_rcnn.KeypointRCNN
    """
    def __init__(self,
        num_classes=11, # person and no_person
        *args, **kwargs
    ):
        super().__init__()
        self.kp_model = keypointrcnn_resnet50_fpn(
            num_classes=num_classes,
        )

    def forward(self, images, targets=None):
        """
        when train: 
            model inputs:
                img: tensor
                targets: a dictionary 
                    - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
                    ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
                    - labels (``Int64Tensor[N]``): the class label for each ground-truth box
                    - keypoints (``FloatTensor[N, K, 3]``): the ``K`` keypoints location for each of the ``N`` instances, in the
                    format ``[x, y, visibility]``, where ``visibility=0`` means that the keypoint is not visible.
            model outputs:
                loss: Dict[Tensor] with classification losses, regression losses, keypoint loss
        when inference:
            model inputs:
                img: tensor
            model outputs:
                - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
                ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
                - labels (``Int64Tensor[N]``): the predicted labels for each instance
                - scores (``Tensor[N]``): the scores or each instance
                - keypoints (``FloatTensor[N, K, 3]``): the locations of the predicted keypoints, in ``[x, y, v]`` format.
        """
        if targets:
            self.kp_model.train()
            outputs = self.kp_model(images, targets)
        else:
            self.kp_model.eval()
            outputs = self.kp_model(images)
        return outputs

