import torch
import torch.nn as nn

import torch.nn.functional as F

from torchvision.models import resnet50, ResNet50_Weights
class BiFPN(nn.Module):
    def __init__(self, fpn_sizes):
        """
        fpn_sizes: list，依次为 P3, P4, P5, P6, P7 各尺度输入的通道数
        """
        super(BiFPN, self).__init__()
        P3_channels, P4_channels, P5_channels, P6_channels, P7_channels = fpn_sizes
        self.W_bifpn = 64  # BiFPN 输出通道数

        # -------- Top-Down Pathway --------
        # P6_td 分支：融合 P6 与上采样后的 P7
        self.p6_td_conv = nn.Conv2d(P6_channels, self.W_bifpn, kernel_size=3, stride=1, padding=1)
        self.p6_td_conv_2 = nn.Conv2d(self.W_bifpn, self.W_bifpn, kernel_size=3, stride=1,
                                      groups=self.W_bifpn, padding=1)
        self.p6_td_act = nn.ReLU()
        self.p6_td_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p6_td_w1 = nn.Parameter(torch.tensor(1.0))
        self.p6_td_w2 = nn.Parameter(torch.tensor(1.0))

        # P5_td 分支：融合 P5 与上采样后的 P6_td
        self.p5_td_conv = nn.Conv2d(P5_channels, self.W_bifpn, kernel_size=3, stride=1, padding=1)
        self.p5_td_conv_2 = nn.Conv2d(self.W_bifpn, self.W_bifpn, kernel_size=3, stride=1,
                                      groups=self.W_bifpn, padding=1)
        self.p5_td_act = nn.ReLU()
        self.p5_td_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p5_td_w1 = nn.Parameter(torch.tensor(1.0))
        self.p5_td_w2 = nn.Parameter(torch.tensor(1.0))

        # P4_td 分支：融合 P4 与上采样后的 P5_td
        self.p4_td_conv = nn.Conv2d(P4_channels, self.W_bifpn, kernel_size=3, stride=1, padding=1)
        self.p4_td_conv_2 = nn.Conv2d(self.W_bifpn, self.W_bifpn, kernel_size=3, stride=1,
                                      groups=self.W_bifpn, padding=1)
        self.p4_td_act = nn.ReLU()
        self.p4_td_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p4_td_w1 = nn.Parameter(torch.tensor(1.0))
        self.p4_td_w2 = nn.Parameter(torch.tensor(1.0))

        # P3_out 分支：融合 P3 与上采样后的 P4_td
        self.p3_out_conv = nn.Conv2d(P3_channels, self.W_bifpn, kernel_size=3, stride=1, padding=1)
        self.p3_out_conv_2 = nn.Conv2d(self.W_bifpn, self.W_bifpn, kernel_size=3, stride=1,
                                       groups=self.W_bifpn, padding=1)
        self.p3_out_act = nn.ReLU()
        self.p3_out_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p3_out_w1 = nn.Parameter(torch.tensor(1.0))
        self.p3_out_w2 = nn.Parameter(torch.tensor(1.0))

        # -------- Bottom-Up Pathway --------
        # p7 分支：最顶层特征经过卷积处理
        self.p7_out_conv = nn.Conv2d(P7_channels, self.W_bifpn, kernel_size=3, stride=1, padding=1)
        self.p7_out_conv_2 = nn.Conv2d(self.W_bifpn, self.W_bifpn, kernel_size=3, stride=1,
                                       groups=self.W_bifpn, padding=1)
        self.p7_out_act = nn.ReLU()
        self.p7_out_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p7_out_w1 = nn.Parameter(torch.tensor(1.0))
        self.p7_out_w2 = nn.Parameter(torch.tensor(1.0))

        # P4_out 分支
        self.p4_out_conv = nn.Conv2d(self.W_bifpn, self.W_bifpn, kernel_size=3, stride=1, padding=1)
        self.p4_out_act = nn.ReLU()
        self.p4_out_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p4_out_w1 = nn.Parameter(torch.tensor(1.0))
        self.p4_out_w2 = nn.Parameter(torch.tensor(1.0))
        self.p4_out_w3 = nn.Parameter(torch.tensor(1.0))

        # P5_out 分支
        self.p5_out_conv = nn.Conv2d(self.W_bifpn, self.W_bifpn, kernel_size=3, stride=1, padding=1)
        self.p5_out_act = nn.ReLU()
        self.p5_out_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p5_out_w1 = nn.Parameter(torch.tensor(1.0))
        self.p5_out_w2 = nn.Parameter(torch.tensor(1.0))
        self.p5_out_w3 = nn.Parameter(torch.tensor(1.0))

        # P6_out 分支
        self.p6_out_conv = nn.Conv2d(self.W_bifpn, self.W_bifpn, kernel_size=3, stride=1, padding=1)
        self.p6_out_act = nn.ReLU()
        self.p6_out_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p6_out_w1 = nn.Parameter(torch.tensor(1.0))
        self.p6_out_w2 = nn.Parameter(torch.tensor(1.0))
        self.p6_out_w3 = nn.Parameter(torch.tensor(1.0))

        # 下采样模块（用于 Bottom-Up Pathway）
        self.p3_downsample = nn.MaxPool2d(kernel_size=2)
        self.p4_downsample = nn.MaxPool2d(kernel_size=2)
        self.p5_downsample = nn.MaxPool2d(kernel_size=2)

    def forward(self, inputs):
        epsilon = 1e-4
        P3, P4, P5, P6, P7 = inputs

        # -------- Top-Down Pathway --------
        # 对 P7 先卷积，再上采样到 P6 的空间尺寸
        P7_td = self.p7_out_conv(P7)  # [B, 64, H_P7, W_P7]
        # 根据 P6 的尺寸动态上采样
        P7_td_up = F.interpolate(P7_td, size=P6.shape[2:], mode='nearest')

        P6_td_inp = self.p6_td_conv(P6)  # [B, 64, H_P6, W_P6]
        P6_td = self.p6_td_conv_2(
            (self.p6_td_w1 * P6_td_inp + self.p6_td_w2 * P7_td_up) /
            (self.p6_td_w1 + self.p6_td_w2 + epsilon)
        )
        P6_td = self.p6_td_act(P6_td)
        P6_td = self.p6_td_conv_bn(P6_td)

        # 对 P6_td 上采样到 P5 的尺寸
        P5_td_inp = self.p5_td_conv(P5)  # [B, 64, H_P5, W_P5]
        P6_td_up = F.interpolate(P6_td, size=P5.shape[-2:], mode='nearest')
        P5_td = self.p5_td_conv_2(
            (self.p5_td_w1 * P5_td_inp + self.p5_td_w2 * P6_td_up) /
            (self.p5_td_w1 + self.p5_td_w2 + epsilon)
        )
        P5_td = self.p5_td_act(P5_td)
        P5_td = self.p5_td_conv_bn(P5_td)

        # 对 P5_td 上采样到 P4 的尺寸
        P4_td_inp = self.p4_td_conv(P4)  # [B, 64, H_P4, W_P4]
        P5_td_up = F.interpolate(P5_td, size=P4.shape[-2:], mode='nearest')
        P4_td = self.p4_td_conv_2(
            (self.p4_td_w1 * P4_td_inp + self.p4_td_w2 * P5_td_up) /
            (self.p4_td_w1 + self.p4_td_w2 + epsilon)
        )
        P4_td = self.p4_td_act(P4_td)
        P4_td = self.p4_td_conv_bn(P4_td)

        # 对 P4_td 上采样到 P3 的尺寸
        P3_td = self.p3_out_conv(P3)  # [B, 64, H_P3, W_P3]
        P4_td_up = F.interpolate(P4_td, size=P3.shape[-2:], mode='nearest')
        P3_out = self.p3_out_conv_2(
            (self.p3_out_w1 * P3_td + self.p3_out_w2 * P4_td_up) /
            (self.p3_out_w1 + self.p3_out_w2 + epsilon)
        )
        P3_out = self.p3_out_act(P3_out)
        P3_out = self.p3_out_conv_bn(P3_out)

        # -------- Bottom-Up Pathway --------
        P4_out = self.p4_out_conv(
            (self.p4_out_w1 * P4_td_inp + self.p4_out_w2 * P4_td + self.p4_out_w3 * self.p3_downsample(P3_out))
            / (self.p4_out_w1 + self.p4_out_w2 + self.p4_out_w3 + epsilon)
        )
        P4_out = self.p4_out_act(P4_out)
        P4_out = self.p4_out_conv_bn(P4_out)

        P5_out = self.p5_out_conv(
            (self.p5_out_w1 * P5_td_inp + self.p5_out_w2 * P5_td + self.p5_out_w3 * self.p4_downsample(P4_out))
            / (self.p5_out_w1 + self.p5_out_w2 + self.p5_out_w3 + epsilon)
        )
        P5_out = self.p5_out_act(P5_out)
        P5_out = self.p5_out_conv_bn(P5_out)

        P6_out = self.p6_out_conv(
            (self.p6_out_w1 * P6_td_inp + self.p6_out_w2 * P6_td + self.p6_out_w3 * self.p5_downsample(P5_out))
            / (self.p6_out_w1 + self.p6_out_w2 + self.p6_out_w3 + epsilon)
        )
        P6_out = self.p6_out_act(P6_out)
        P6_out = self.p6_out_conv_bn(P6_out)
        P7_td = F.interpolate(P7_td, size=P6_out.shape[-2:], mode='nearest')  # 修复点

        P7_out = self.p7_out_conv_2(
            (self.p7_out_w1 * P7_td + self.p7_out_w2 * P6_out) /
            (self.p7_out_w1 + self.p7_out_w2 + epsilon)
        )

        P7_out = self.p7_out_act(P7_out)
        P7_out = self.p7_out_conv_bn(P7_out)

        # 返回融合后的多尺度特征（此处返回 5 个尺度，后续可根据任务选择使用其中一层）
        return [P3_out, P4_out, P5_out, P6_out, P7_out]


class MultiLabelBiFPNResNet(nn.Module):
    def __init__(self, num_classes=8):
        super(MultiLabelBiFPNResNet, self).__init__()
        
        # 使用 ResNet50 作为骨干网络，但不下载预训练权重
        self.backbone = resnet50(weights=None)  # 修改这一行，设置 weights=None
        
        # 冻结所有参数（根据需要可解冻部分层）
        for param in self.backbone.parameters():
            param.requires_grad = False
        # 例如，只解冻 layer3 的参数（可根据需求调整）
        for param in self.backbone.layer4.parameters():
            param.requires_grad = True

        # 额外池化层，用于生成更低分辨率的 P7 特征
        self.extra_pool = nn.MaxPool2d(kernel_size=2, stride=2)

        # 利用 1×1 卷积将 ResNet 的中间特征通道数调整到 BiFPN 的输入要求
        # ResNet 各层输出通道数：layer1:256, layer2:512, layer3:1024, layer4:2048
        self.reduce_c3 = nn.Conv2d(256, 40, kernel_size=1)
        self.reduce_c4 = nn.Conv2d(512, 112, kernel_size=1)
        self.reduce_c5 = nn.Conv2d(1024, 192, kernel_size=1)
        self.reduce_c6 = nn.Conv2d(2048, 192, kernel_size=1)
        self.reduce_c7 = nn.Conv2d(2048, 1280, kernel_size=1)

        # BiFPN 模块
        self.bifpn = BiFPN([40, 112, 192, 192, 1280])

        # 分类头：先对单侧（例如左眼或右眼）的 BiFPN 输出特征（通道数=64）全局池化，
        # 两侧拼接后维度为 64*2=128，再通过全连接层得到最终分类输出
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc1 = nn.Linear(512, 256)
        self.dropout = nn.Dropout(0.5)
        self.fc2 = nn.Linear(256, num_classes)

    def forward_once(self, x):
        # 利用 ResNet 提取特征
        x = self.backbone.conv1(x)
        x = self.backbone.bn1(x)
        x = self.backbone.relu(x)
        x = self.backbone.maxpool(x)

        c3 = self.backbone.layer1(x)  # [B, 256, H/4, W/4]
        c4 = self.backbone.layer2(c3)  # [B, 512, H/8, W/8]
        c5 = self.backbone.layer3(c4)  # [B, 1024, H/16, W/16]
        c6 = self.backbone.layer4(c5)  # [B, 2048, H/32, W/32]
        c7 = self.extra_pool(c6)  # [B, 2048, H/64, W/64]

        # 通道调整
        p3 = self.reduce_c3(c3)  # [B, 40, H/4, W/4]
        p4 = self.reduce_c4(c4)  # [B, 112, H/8, W/8]
        p5 = self.reduce_c5(c5)  # [B, 192, H/16, W/16]
        p6 = self.reduce_c6(c6)  # [B, 192, H/32, W/32]
        p7 = self.reduce_c7(c7)  # [B, 1280, H/64, W/64]

        # 传入 BiFPN 融合多尺度特征，返回 5 个尺度的输出
        feats = self.bifpn([p3, p4, p5, p6, p7])
        # 此处选用最高分辨率的 P3 输出作为后续分类特征
        P3_out, P4_out, P5_out, P6_out, _ = feats

        # 将 P4, P5, P6 上采样到 P3 的空间尺寸
        P4_up = F.interpolate(P4_out, size=P3_out.shape[-2:], mode='nearest')
        P5_up = F.interpolate(P5_out, size=P3_out.shape[-2:], mode='nearest')
        P6_up = F.interpolate(P6_out, size=P3_out.shape[-2:], mode='nearest')

        # 融合多个尺度特征，这里采用通道拼接的方式
        fused_features = torch.cat([P3_out, P4_up, P5_up, P6_up], dim=1)  # [B, 64*4, H/4, W/4]

        return fused_features

    def forward(self, left_images, right_images):
        # 分别提取左右图像特征
        left_feat = self.forward_once(left_images)
        right_feat = self.forward_once(right_images)

        # 全局平均池化后展开成一维向量
        left_pool = self.avgpool(left_feat).view(left_feat.size(0), -1)  # [B, 64]
        right_pool = self.avgpool(right_feat).view(right_feat.size(0), -1)  # [B, 64]

        # 拼接左右特征
        combined = torch.cat([left_pool, right_pool], dim=1)  # [B, 128]

        # 分类全连接层
        x = self.fc1(combined)
        x = F.relu(x)
        x = self.dropout(x)
        x = self.fc2(x)
        return x
