import torch
import torch.nn as nn
import torchvision
from torchvision.models.detection import MaskRCNN
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from torchvision.models.detection.transform import GeneralizedRCNNTransform

class SimpleMaskRCNN(nn.Module):
    def __init__(self, num_classes):
        super().__init__()
        # Backbone (ResNet50 + FPN)
        self.backbone = resnet_fpn_backbone(
            backbone_name='resnet50',
            pretrained=True,
            trainable_layers=3
        )
        
        # Corrected RPN initialization
        self.rpn = torchvision.models.detection.rpn.RegionProposalNetwork(
            anchor_generator=torchvision.models.detection.anchor_utils.AnchorGenerator(
                sizes=((32, 64, 128, 256, 512),),
                aspect_ratios=((0.5, 1.0, 2.0),)
            ),
            head=torchvision.models.detection.rpn.RPNHead(
                in_channels=256,
                num_anchors=3
            ),
            fg_iou_thresh=0.7,
            bg_iou_thresh=0.3,
            batch_size_per_image=256,
            positive_fraction=0.5,
            pre_nms_top_n=dict(training=2000, testing=1000),
            post_nms_top_n=dict(training=2000, testing=1000),
            nms_thresh=0.7
        )
        

        self.roi_heads = torchvision.models.detection.roi_heads.RoIHeads(
            # 新增的必要参数组

            bbox_reg_weights=None,  # 新增参数
            score_thresh=0.05,      # 新增阈值参数
            nms_thresh=0.5,         # 新增NMS阈值
            detections_per_img=100, # 新增检测数量限制
            
            # 保持原有配置参数
            box_roi_pool=torchvision.ops.MultiScaleRoIAlign(
                featmap_names=['0', '1', '2', '3'],
                output_size=7,
                sampling_ratio=2),
            box_head=torchvision.models.detection.faster_rcnn.TwoMLPHead(
                in_channels=256 * 7 ** 2,
                representation_size=1024),
            box_predictor=torchvision.models.detection.faster_rcnn.FastRCNNPredictor(1024, num_classes),

            # Mask参数组
            mask_roi_pool=torchvision.ops.MultiScaleRoIAlign(
                featmap_names=['0', '1', '2', '3'],
                output_size=14,
                sampling_ratio=2),
            mask_head=torchvision.models.detection.mask_rcnn.MaskRCNNHeads(
                in_channels=256,
                layers=(256, 256, 256, 256),
                dilation=1),
            mask_predictor=torchvision.models.detection.mask_rcnn.MaskRCNNPredictor(256, 256, num_classes),

            # 必要的基础参数
            fg_iou_thresh=0.5,
            bg_iou_thresh=0.5,
            batch_size_per_image=512,
            positive_fraction=0.25,
            # 新增的box阈值参数
            # box_fg_iou_thresh=0.5,
            # box_bg_iou_thresh=0.5
        )
        
        # 移除类内部的forward嵌套定义
        # 注意这里要确保__init__方法的括号正确闭合
        self.transform = GeneralizedRCNNTransform(
            min_size=720, max_size=1333,
            image_mean=[0.485, 0.456, 0.406],
            image_std=[0.229, 0.224, 0.225]
        )
        # 正确定义forward方法（类级别作用域）
    def forward(self, images, targets=None):
        # 处理单张图片输入的情况（兼容官方逻辑）
        if isinstance(images, torch.Tensor):
            images = [images]
        
        # 预处理（返回ImageList对象）
        images, targets = self.transform(images, targets)
        
        # 正确获取原始尺寸（通过ImageList属性）
        original_image_sizes = images.image_sizes  # 直接获取属性
        features = self.backbone(images.tensors)    # 通过tensors属性获取张量
        
        # 修正RPN调用参数顺序（官方参数顺序：features, images.image_sizes, targets）
        proposals, proposal_losses = self.rpn(features, images.image_sizes, targets)
        
        # RoI Heads前向传播
        detections, detector_losses = self.roi_heads(
            features, 
            proposals, 
            images.image_sizes, 
            targets
        )
        
        # 后处理（使用transform的postprocess方法）
        detections = self.transform.postprocess(
            detections, 
            images.image_sizes, 
            original_image_sizes
        )

        # 训练模式返回损失字典（必须包含所有损失项）
        if self.training:
            return {
                **proposal_losses,
                **detector_losses
            }
        
        return detections