import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.ops import MultiScaleRoIAlign, roi_align
from torchvision.models.detection import _utils as det_utils
from torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork
from torchvision.models.detection.roi_heads import RoIHeads, fastrcnn_loss, maskrcnn_inference, maskrcnn_loss
from torchvision.models.detection.transform import GeneralizedRCNNTransform
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from torchvision.models.detection.faster_rcnn import TwoMLPHead, FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNHeads, MaskRCNNPredictor

from collections import OrderedDict
class MaskRCNN(nn.Module):
    def __init__(self, backbone, num_classes, rpn_anchor_generator=None, rpn_head=None,
                 rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,
                 rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,
                 rpn_nms_thresh=0.7, rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,
                 rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,
                 box_roi_pool=None, box_head=None, box_predictor=None,
                 box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,
                 box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,
                 box_batch_size_per_image=512, box_positive_fraction=0.25,
                 bbox_reg_weights=None, mask_roi_pool=None, mask_head=None, mask_predictor=None):
        super(MaskRCNN, self).__init__()
        self.backbone = backbone
        self.rpn = RegionProposalNetwork(
            anchor_generator=rpn_anchor_generator,
            head=rpn_head,
            fg_iou_thresh=rpn_fg_iou_thresh,
            bg_iou_thresh=rpn_bg_iou_thresh,
            batch_size_per_image=rpn_batch_size_per_image,
            positive_fraction=rpn_positive_fraction,
            pre_nms_top_n_train=rpn_pre_nms_top_n_train,
            pre_nms_top_n_test=rpn_pre_nms_top_n_test,
            post_nms_top_n_train=rpn_post_nms_top_n_train,
            post_nms_top_n_test=rpn_post_nms_top_n_test,
            nms_thresh=rpn_nms_thresh
        )
        self.roi_heads = RoIHeads(
            box_roi_pool=box_roi_pool,
            box_head=box_head,
            box_predictor=box_predictor,
            fg_iou_thresh=box_fg_iou_thresh,
            bg_iou_thresh=box_bg_iou_thresh,
            batch_size_per_image=box_batch_size_per_image,
            positive_fraction=box_positive_fraction,
            bbox_reg_weights=bbox_reg_weights,
            score_thresh=box_score_thresh,
            nms_thresh=box_nms_thresh,
            detections_per_img=box_detections_per_img,
            mask_roi_pool=mask_roi_pool,
            mask_head=mask_head,
            mask_predictor=mask_predictor
        )
        self.transform = GeneralizedRCNNTransform(min_size=800, max_size=1333, image_mean=[0.485, 0.456, 0.406], image_std=[0.229, 0.224, 0.225])

    def forward(self, images, targets=None):
        if self.training and targets is None:
            raise ValueError("In training mode, targets should be passed")
        original_image_sizes = [img.shape[-2:] for img in images]
        images, targets = self.transform(images, targets)
        features = self.backbone(images.tensors)
        if isinstance(features, torch.Tensor):
            features = OrderedDict([('0', features)])
        proposals, proposal_losses = self.rpn(images, features, targets)
        detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)
        detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)
        losses = {}
        losses.update(detector_losses)
        losses.update(proposal_losses)
        if self.training:
            return losses
        return detections

def build_backbone():
    return resnet_fpn_backbone('resnet50', pretrained=True)

def build_anchor_generator():
    anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
    aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
    return AnchorGenerator(anchor_sizes, aspect_ratios)

def build_rpn_head():
    return RPNHead(256, 3)

def build_box_roi_pool():
    return MultiScaleRoIAlign(featmap_names=['0', '1', '2', '3'], output_size=7, sampling_ratio=2)

def build_box_head():
    return TwoMLPHead(256 * 7 * 7, 1024)

def build_box_predictor(num_classes):
    return FastRCNNPredictor(1024, num_classes)

def build_mask_roi_pool():
    return MultiScaleRoIAlign(featmap_names=['0', '1', '2', '3'], output_size=14, sampling_ratio=2)

def build_mask_head():
    return MaskRCNNHeads(256, (256, 256, 256, 256), (3, 3, 3, 3))

def build_mask_predictor(num_classes):
    return MaskRCNNPredictor(256, 256, num_classes)

def build_model(num_classes):
    backbone = build_backbone()
    anchor_generator = build_anchor_generator()
    rpn_head = build_rpn_head()
    box_roi_pool = build_box_roi_pool()
    box_head = build_box_head()
    box_predictor = build_box_predictor(num_classes)
    mask_roi_pool = build_mask_roi_pool()
    mask_head = build_mask_head()
    mask_predictor = build_mask_predictor(num_classes)
    model = MaskRCNN(backbone, num_classes, rpn_anchor_generator=anchor_generator, rpn_head=rpn_head,
                     box_roi_pool=box_roi_pool, box_head=box_head, box_predictor=box_predictor,
                     mask_roi_pool=mask_roi_pool, mask_head=mask_head, mask_predictor=mask_predictor)
    return model

# Example usage
if __name__ == "__main__":
    num_classes = 91  # COCO dataset
    model = build_model(num_classes)
    model.eval()
    x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
    predictions = model(x)
    print(predictions)