import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
# Fast R-CNN 检测模型
from torchvision.models.detection import fasterrcnn_resnet50_fpn
from torchvision.models import resnet18
from STN import STN, SpatialTransformer
from GTF import GTF

from torchvision.models.detection import fasterrcnn_resnet50_fpn
from torchvision.models.resnet import resnet50
from torchvision.models.detection.backbone_utils import (
    _resnet_fpn_extractor,
    _validate_trainable_layers,
)
from torchvision.ops import misc as misc_nn_ops

from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models.detection.transform import GeneralizedRCNNTransform


def get_4ch_faster_rcnn(num_classes: int, pretrained: bool = True):
    # 1. 4 通道 ResNet50 + FPN backbone
    resnet = resnet50(pretrained=pretrained)
    old_conv = resnet.conv1
    resnet.conv1 = nn.Conv2d(4, 64, 7, 2, 3, bias=False)
    with torch.no_grad():
        resnet.conv1.weight[:, :3] = old_conv.weight
        nn.init.kaiming_normal_(resnet.conv1.weight[:, 3:])

    trainable_backbone_layers = 3
    backbone = _resnet_fpn_extractor(resnet, trainable_backbone_layers)

    # 2. 构造 FasterRCNN（注意不用 fasterrcnn_resnet50_fpn）
    anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
    aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
    rpn_anchor_gen = AnchorGenerator(sizes=anchor_sizes, aspect_ratios=aspect_ratios)


    # 4 通道均值/方差：RGB 沿用官方值，IR 用 0.5 作为占位（可自行统计）
    image_mean = [0.485, 0.456, 0.406, 0.5]
    image_std  = [0.229, 0.224, 0.225, 0.229]

    transform = GeneralizedRCNNTransform(
        min_size=640, max_size=640,
        image_mean=image_mean, image_std=image_std
    )

    model = FasterRCNN(
        backbone=backbone,
        num_classes=num_classes,
        rpn_anchor_generator=rpn_anchor_gen,
        transform=transform           # 关键：覆盖默认 3 通道 normalize
    )
    return model

class DamageDetectionModel(nn.Module):
    def __init__(self, num_damage_types):
        super(DamageDetectionModel, self).__init__()
        # 配准功能模块
        self.stn = STN()

    def forward(self, image_1, image_2):
        # STN变换image_2
        image_2_transformed = self.stn(image_2)
        """ 
            TODO：此处的图像融合应该考虑过渡，同时应该
            考虑到性能问题
        """
        # 合并图像
        image_merged = torch.cat((image_1, image_2_transformed), dim=1)

        return image_merged, outputs


""" 
    这并非一个可行的方案，仅仅可以单目标检测，完全不符合
"""


class RegistrationModel(nn.Module):
    def __init__(self):
        super().__init__()
        # 1. 特征提取（融合可见光和红外特征）
        self.backbone = resnet18(pretrained=True)
        """ 
            
        """
        self.backbone.conv1 = nn.Conv2d(
            4, 64, kernel_size=7, stride=2, padding=3, bias=False
        )  # 输入：3+1通道

        # 2. 配准模块（将红外图像配准到可见光视角）
        self.stn = SpatialTransformer(in_channels=1)  # 输入红外单通道

        # 3. 边界框预测头（预测配准后目标的边界框）
        self.bbox_head = nn.Sequential(
            nn.Linear(512, 128),  # 接resnet18的avgpool输出(512)
            nn.ReLU(),
            nn.Linear(128, 4),  # 输出归一化的bbox (x1, y1, x2, y2)
        )

    def forward(self, img1, img2):
        """
        img1: 可见光图像 (B, 3, H, W)
        img2: 红外图像 (B, 1, H, W)
        return: merged_img (B, 4, H, W), pred_bbox (B, 4)
        """
        # 步骤1：配准红外图像到可见光视角
        img2_warped = self.stn(img2)  # 红外图像配准

        # 步骤2：合并图像（可见光RGB + 配准后红外）
        merged_img = torch.cat([img1, img2_warped], dim=1)  # (B, 4, H, W)

        # 步骤3：特征融合与边界框预测
        x = self.backbone.conv1(merged_img)
        x = self.backbone.bn1(x)
        x = self.backbone.relu(x)
        x = self.backbone.maxpool(x)
        x = self.backbone.layer1(x)
        x = self.backbone.layer2(x)
        x = self.backbone.layer3(x)
        x = self.backbone.layer4(x)
        x = self.backbone.avgpool(x)
        x = torch.flatten(x, 1)  # (B, 512)
        pred_bbox = self.bbox_head(x)  # (B, 4)，归一化坐标

        return merged_img, pred_bbox


class STN_FasterRCNN(nn.Module):
    def __init__(self, num_classes):
        super().__init__()
        self.stn = SpatialTransformer(in_channels=1)  # 你的 STN
        self.gtf = GTF()
        # 3 通道 Faster R-CNN，直接复用预训练
        self.detector = torchvision.models.detection.fasterrcnn_resnet50_fpn(
            num_classes=num_classes
        )
    def forward(self, vis, ir, targets=None):
            """
            vis: list[Tensor(3,H,W)]  已归一化 RGB
            ir : list[Tensor(1,H,W)]  已归一化 IR
            """
            # 1. STN 对齐红外
            ir_aligned = [self.stn(i.unsqueeze(0))[0].squeeze(0) for i in ir]

            # 2. GTF 融合 → 3 通道
            fused = [self.gtf(v.unsqueeze(0), ia.unsqueeze(0)).squeeze(0)
                    for v, ia in zip(vis, ir_aligned)]

            if self.training:
                return self.detector(fused, targets)
            else:
                return self.detector(fused)
