import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import vgg16


# RPN网络
class RPN(nn.Module):
    def __init__(self, in_channels=512, mid_channels=512, ratios=[0.5, 1, 2], anchor_scales=[8, 16, 32]):
        super(RPN, self).__init__()

        # 定义第一个卷积层,输入通道数为in_channels,输出通道数为mid_channels,卷积核大小为3,步长为1,填充为1
        self.conv1 = nn.Conv2d(in_channels, mid_channels, kernel_size=3, stride=1, padding=1)
        # 定义第二个卷积层,输入通道数为mid_channels,输出通道数为mid_channels,卷积核大小为3,步长为1,填充为1
        self.conv2 = nn.Conv2d(mid_channels, mid_channels, kernel_size=3, stride=1, padding=1)

        # 定义分类层,输入通道数为mid_channels,输出通道数为len(ratios) * len(anchor_scales) * 2,卷积核大小为1,步长为1
        # 输出通道数表示每个位置生成len(ratios) * len(anchor_scales)个anchor,每个anchor对应前景/背景两个类别
        self.cls_layer = nn.Conv2d(mid_channels, len(ratios) * len(anchor_scales) * 2, kernel_size=1, stride=1)

        # 定义回归层,输入通道数为mid_channels,输出通道数为len(ratios) * len(anchor_scales) * 4,卷积核大小为1,步长为1
        # 输出通道数表示每个位置生成len(ratios) * len(anchor_scales)个anchor,每个anchor对应4个坐标偏移量
        self.reg_layer = nn.Conv2d(mid_channels, len(ratios) * len(anchor_scales) * 4, kernel_size=1, stride=1)

        # 保存生成anchor的比例和尺度
        self.ratios = ratios
        self.anchor_scales = anchor_scales

    def forward(self, x, img_size, feat_stride):
        # 获取输入特征图的形状
        batch_size, _, feat_height, feat_width = x.shape

        # 对输入特征图进行两次卷积操作,提取高级特征
        x = F.relu(self.conv1(x))
        x = F.relu(self.conv2(x))

        # 通过分类层预测每个位置anchor的前景/背景概率
        rpn_cls = self.cls_layer(x)
        # 通过回归层预测每个位置anchor的边框偏移量
        rpn_reg = self.reg_layer(x)

        # 调整分类输出的形状为(batch_size, num_anchors, 2)
        # num_anchors = feat_height * feat_width * len(ratios) * len(anchor_scales)
        rpn_cls = rpn_cls.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 2)

        # 调整回归输出的形状为(batch_size, num_anchors, 4)
        rpn_reg = rpn_reg.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 4)

        # 生成所有的anchor坐标
        anchors = self.generate_anchors(feat_height, feat_width, feat_stride)

        # 返回分类预测结果,回归预测结果和生成的anchor坐标
        return rpn_cls, rpn_reg, anchors

    def generate_anchors(self, feat_height, feat_width, feat_stride, scales=[8, 16, 32], ratios=[0.5, 1, 2]):
        """
        生成基础锚框
        :param feat_height: 特征图的高度
        :param feat_width: 特征图的宽度
        :param feat_stride: 原始图像到特征图的步幅
        :param scales: 锚框的不同尺度（大小）
        :param ratios: 锚框的不同长宽比
        :return: 返回形状为 (num_anchors, 4) 的张量，每行表示 [xmin, ymin, xmax, ymax]
        """
        anchors = []

        for y in range(feat_height):
            for x in range(feat_width):
                # 计算中心点
                center_x = x * feat_stride
                center_y = y * feat_stride

                # 对每种尺度和长宽比生成锚框
                for scale in scales:
                    for ratio in ratios:
                        h = scale * (ratio ** 0.5)
                        w = scale / (ratio ** 0.5)

                        # 计算左上角和右下角坐标
                        xmin = center_x - w / 2
                        ymin = center_y - h / 2
                        xmax = center_x + w / 2
                        ymax = center_y + h / 2

                        # 添加到锚框列表
                        anchors.append([xmin, ymin, xmax, ymax])

        # 将所有锚框转换为张量并返回
        return torch.tensor(anchors, dtype=torch.float32)


# RoI池化层
class RoIPool(nn.Module):
    def __init__(self, output_size=(7, 7)):
        super(RoIPool, self).__init__()
        self.output_size = output_size
        self.pooling = nn.AdaptiveMaxPool2d(output_size)

    def forward(self, feature_map, rois):
        # 将rois转换为整数坐标
        rois = rois.to(torch.int64)

        # 获取特征图的尺寸
        _, _, h, w = feature_map.shape

        # 提取每个候选区域对应的特征图块
        roi_feats = []
        for roi in rois:
            # 对候选区域的坐标进行裁剪,确保在特征图范围内
            y1, x1, y2, x2 = roi
            y1 = torch.clamp(y1, min=0, max=h)
            x1 = torch.clamp(x1, min=0, max=w)
            y2 = torch.clamp(y2, min=0, max=h)
            x2 = torch.clamp(x2, min=0, max=w)

            roi_slice = feature_map[:, :, y1:y2 + 1, x1:x2 + 1]
            roi_feats.append(self.pooling(roi_slice))

        # 将提取的特征块堆叠为一个批次
        roi_feats = torch.stack(roi_feats)

        return roi_feats


# Faster R-CNN模型
class FasterRCNN(nn.Module):
    def __init__(self, num_classes):
        super(FasterRCNN, self).__init__()
        self.extractor = vgg16().features[:-1]
        self.rpn = RPN()
        self.roi_pool = RoIPool()
        self.cls_layer = nn.Linear(512 * 7 * 7, num_classes)
        self.reg_layer = nn.Linear(512 * 7 * 7, (num_classes - 1) * 4)  # 不需要背景类的边框回归

    def forward(self, x, scale=1.):
        img_size = x.shape[2:]
        features = self.extractor(x)
        rpn_cls, rpn_reg, anchors = self.rpn(features, img_size, scale)  # 接收所有三个返回值
        rois = self.apply_reg(anchors, rpn_reg)  # 应用边框回归,生成候选区域
        roi_feats = self.roi_pool(features, rois).view(rois.size(0), -1)
        cls_out = self.cls_layer(roi_feats)
        reg_out = self.reg_layer(roi_feats)
        return cls_out, reg_out, rois

    def apply_reg(self, anchors, rpn_reg):
        """
        应用边框回归,根据RPN预测的偏移量调整anchor的位置
        :param anchors: 生成的所有anchor坐标,形状为(num_anchors, 4)
        :param rpn_reg: RPN预测的边框偏移量,形状为(batch_size, num_anchors, 4)
        :return: 调整后的候选区域坐标,形状为(batch_size * num_anchors, 4)
        """
        # 将anchor和偏移量的坐标表示转换为中心点+宽高的形式
        anchor_centers = (anchors[:, :2] + anchors[:, 2:]) / 2
        anchor_sizes = anchors[:, 2:] - anchors[:, :2]

        # 应用偏移量
        rpn_reg = rpn_reg.view(-1, 4)
        centers = anchor_centers + rpn_reg[:, :2] * anchor_sizes
        sizes = anchor_sizes * torch.exp(rpn_reg[:, 2:])

        # 将坐标表示转换回左上角+右下角的形式
        rois = torch.cat([centers - sizes / 2, centers + sizes / 2], dim=1)

        return rois


def test_with_random_input():
    # 判断是否有可用的 GPU
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 创建一个 Faster R-CNN 模型，假设分类类别数量为 21（包括背景类）
    num_classes = 20 + 1  # 20类 + 背景
    model = FasterRCNN(num_classes=num_classes).to(device)  # 将模型移动到相应设备

    # 生成随机输入，假设输入图像大小为 (3, 800, 800)
    random_input = torch.randn(1, 3, 800, 800).to(device)  # 将输入移动到相应设备

    # 前向传播
    with torch.no_grad():  # 禁用梯度计算，前向传播时不需要计算梯度
        cls_out, reg_out, rois = model(random_input)

    # 打印输出
    print("分类输出维度:", cls_out.shape)  # 应该为 (N, num_classes)
    print("回归输出维度:", reg_out.shape)  # 应该为 (N, (num_classes-1) * 4)
    print("生成的RoIs数量:", rois.shape)  # 应该为 (N, 4)，其中 N 是生成的 RoI 数量


if __name__ == '__main__':
    test_with_random_input()
