import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet50

from Child_Adenoid.models.DIANet import DeformableInteractiveAttention


class RFB(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(RFB, self).__init__()
        self.out_channels = out_channels

        # 初始1x1卷积
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1)
        self.relu = nn.ReLU(inplace=True)

        # 分支1: 1x3 + 3x1 + 3x3(dilation=3)
        self.branch1 = nn.Sequential(
            nn.Conv2d(out_channels, out_channels, kernel_size=(1, 3), padding=(0, 1)),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=(3, 1), padding=(1, 0)),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=3, padding=3),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

        # 分支2: 1x5 + 5x1 + 3x3(dilation=5)
        self.branch2 = nn.Sequential(
            nn.Conv2d(out_channels, out_channels, kernel_size=(1, 5), padding=(0, 2)),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=(5, 1), padding=(2, 0)),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=5, padding=5),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

        # 分支3: 1x7 + 7x1 + 3x3(dilation=7)
        self.branch3 = nn.Sequential(
            nn.Conv2d(out_channels, out_channels, kernel_size=(1, 7), padding=(0, 3)),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=(7, 1), padding=(3, 0)),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, dilation=7, padding=7),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

        # 分支4:
        self.branch4_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)

        # 融合卷积
        self.fusion_conv = nn.Sequential(
            nn.Conv2d(4 * out_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
        )

    def forward(self, x):
        base = x
        # 初始1x1卷积
        x = self.conv1(x)
        x = self.relu(x)

        # 处理三个分支
        b1 = self.branch1(x)
        b2 = self.branch2(x)
        b3 = self.branch3(x)

        # 处理分支4（单独的1x1卷积）
        b4 = self.branch4_conv(base)

        # 拼接四个分支的输出
        concat_features = torch.cat([b1, b2, b3, b4], dim=1)

        # 融合卷积
        fused = self.fusion_conv(concat_features)
        # 与分支4相加
        out = fused + b4
        return self.relu(out)

# ---------- 2. MCRAM（不变）---------- #
class MCRAM(nn.Module):
    def __init__(self, in_channels):
        super(MCRAM, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(512, 256, 3,  padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True)
        )

        self.adjust = nn.Sequential(
            nn.Conv2d(in_channels,512,1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.Conv2d(512,256,1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
        )

        self.conv2 = nn.Sequential(
            nn.Conv2d(512 ,64, 3, 1, 1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.Conv2d(64 , 2, 1)
        )

    def forward(self, h, s):
        w = F.softmax(s, dim=1)
        w_ad, w_cv = w[:,0:1], w[:,1:2]
        r_ad, r_cv = 1-w_ad, 1-w_cv

        h = self.adjust(h)
        out = torch.cat([h*r_ad, h*r_cv], dim=1)
        out = self.conv1(out)

        out = self.conv2(torch.cat([h, out], dim=1))
        return out

# ---------- 3. PartialDecoder（ResNet50 通道）---------- #
# class PartialDecoder(nn.Module):
#     def __init__(self, in_channels):
#         super().__init__()
#         self.rfb2 = RFB(in_channels[0], 128)  #f2 512->256
#         self.rfb3 = RFB(in_channels[1], 256)  # f3 1024→256
#         self.rfb4 = RFB(in_channels[2], 512)  # f4 2048→512
#
#         self.conv1 = nn.Sequential(
#             nn.Conv2d(768, 256, 3, 1, 1),
#             nn.BatchNorm2d(256),
#             nn.ReLU(inplace=True),
#             DeformableInteractiveAttention(stride=1,distortionmode=True)
#         )
#         self.conv2 = nn.Sequential(
#             nn.Conv2d(384, 256, 3, 1, 1),
#             nn.BatchNorm2d(256),
#             nn.ReLU(inplace=True),
#             DeformableInteractiveAttention(stride=1,distortionmode=True)
#         )
#         self.up2x = nn.Upsample(scale_factor=2, mode='bilinear',align_corners=True)
#
#     def forward(self, f2, f3, f4):
#         f2 = self.rfb2(f2)
#         f3 = self.rfb3(f3)
#         f4 = self.rfb4(f4)
#
#         f4 = self.up2x(f4)
#         f3_f4 = self.conv1(torch.cat([f3, f4], dim=1))
#         f3_f4 = self.up2x(f3_f4)
#         return self.conv2(torch.cat([f3_f4,f2], dim=1))

class PartialDecoder(nn.Module):
    def __init__(self, in_channels):
        super(PartialDecoder, self).__init__()
        # 输入通道: [f2, f3, f4] = [512, 1024, 2048]
        self.in_channels = in_channels
        self.embed_dim = 256
        # 使用RFB模块处理每个输入特征
        self.rfb_f2 = RFB(in_channels[0], 256)
        self.rfb_f3 = RFB(in_channels[1], 256)
        self.rfb_f4 = RFB(in_channels[2], 512)

        # 注意力模块增强特征交互
        # self.attention_f3_f4 = nn.Sequential(
        #     nn.Conv2d(self.embed_dim * 2, self.embed_dim, kernel_size=1),
        #     nn.BatchNorm2d(self.embed_dim),
        #     nn.ReLU(inplace=True),
        #     nn.Conv2d(self.embed_dim, self.embed_dim, kernel_size=1),
        #     nn.Sigmoid()
        # )
        self.attention_f3_f4 = nn.Sequential(
                nn.Conv2d(self.embed_dim*2, self.embed_dim, 1),
                nn.BatchNorm2d(self.embed_dim),
                nn.ReLU(inplace=True),
                DeformableInteractiveAttention(stride=1, distortionmode=True))

        # self.attention_f2_f3f4 = nn.Sequential(
        #     nn.Conv2d(512, 256, kernel_size=1),
        #     nn.BatchNorm2d(256),
        #     nn.ReLU(inplace=True),
        #     nn.Conv2d(256, 256, kernel_size=1),
        #     nn.Sigmoid()
        # )
        self.attention_f2_f3f4 = nn.Sequential(
                nn.Conv2d(self.embed_dim*2, self.embed_dim, 1),
                nn.BatchNorm2d(self.embed_dim),
                nn.ReLU(inplace=True),
                DeformableInteractiveAttention(stride=1, distortionmode=True))
        # 高效的上采样模块
        self.upconv_f4 = nn.Sequential(
            nn.Conv2d(self.embed_dim*2, self.embed_dim, 1),
            nn.ConvTranspose2d(self.embed_dim, self.embed_dim, kernel_size=2, stride=2))
        self.upconv_f3 = nn.Sequential(
            nn.Conv2d(self.embed_dim, self.embed_dim, 1),
            nn.ConvTranspose2d(self.embed_dim, self.embed_dim, kernel_size=2, stride=2))

        # 特征细化卷积
        self.refine_f4 = nn.Sequential(
            nn.Conv2d(self.embed_dim, self.embed_dim, kernel_size=3, padding=1),
            nn.BatchNorm2d(self.embed_dim),
            nn.ReLU(inplace=True)
        )

        self.refine_f3 = nn.Sequential(
            nn.Conv2d(self.embed_dim, self.embed_dim, kernel_size=3, padding=1),
            nn.BatchNorm2d(self.embed_dim),
            nn.ReLU(inplace=True)
        )

        # 特征融合模块 - 使用残差连接
        self.fusion_f3_f4 = nn.Sequential(
            nn.Conv2d(self.embed_dim * 2, self.embed_dim, kernel_size=3, padding=1),
            nn.BatchNorm2d(self.embed_dim),
            nn.ReLU(inplace=True),
            nn.Conv2d(self.embed_dim, self.embed_dim, kernel_size=1),
            nn.BatchNorm2d(self.embed_dim),
        )

        self.fusion_final = nn.Sequential(
            nn.Conv2d(512, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=1),
            nn.BatchNorm2d(256),
        )

        # 残差连接的权重
        self.relu = nn.ReLU(inplace=True)

        # 添加Dropout防止过拟合
        self.dropout = nn.Dropout2d(0.1)

    def forward(self, f2, f3, f4):
        # 通过RFB模块提取多尺度特征
        f2_rfb = self.rfb_f2(f2)  # 32x32x256
        f3_rfb = self.rfb_f3(f3)  # 16x16x256
        f4_rfb = self.rfb_f4(f4)  # 8x8x512

        # 阶段1: f4->f3 特征融合
        f4_up = self.upconv_f4(f4_rfb)  # 8x8->16x16
        f4_up = self.refine_f4(f4_up)  #256x16x16

        # 注意力增强的特征融合
        f3_f4_cat = torch.cat([f3_rfb, f4_up], dim=1)  # 16x16x512
        attention_weights = self.attention_f3_f4(f3_f4_cat)  # 16x16x256
        f3_enhanced = f3_rfb * attention_weights + f3_rfb  # 残差连接

        # 融合f3和f4特征
        f3_f4_fused = self.fusion_f3_f4(f3_f4_cat)  # 16x16x256
        f3_f4_fused = self.relu(f3_f4_fused + f3_enhanced)  # 残差连接

        # 阶段2: (f3+f4)->f2 特征融合
        f3f4_up = self.upconv_f3(f3_f4_fused)  # 16x16->32x32
        f3f4_up = self.refine_f3(f3f4_up)

        # 注意力增强的最终融合
        f2_f3f4_cat = torch.cat([f2_rfb, f3f4_up], dim=1)  # 32x32x512
        attention_weights_final = self.attention_f2_f3f4(f2_f3f4_cat)  # 32x32x256
        f2_enhanced = f2_rfb * attention_weights_final + f2_rfb  # 残差连接

        # 最终特征融合
        final_fused = self.fusion_final(f2_f3f4_cat)  # 32x32x256
        output = self.relu(final_fused + f2_enhanced)  # 残差连接

        # 添加dropout
        output = self.dropout(output)

        return output

# ---------- 4. 网络主体（ResNet50）---------- #
class AbgNet1(nn.Module):
    def __init__(self, num_classes=2):
        super().__init__()
        backbone = resnet50(weights='IMAGENET1K_V2')
        # for name, p in backbone.named_parameters():
        #     if name.startswith(('layer3', 'layer4')):
        #         p.requires_grad = True
        #     else:
        #         p.requires_grad = False

        self.backbone = backbone
        self.conv1 = nn.Sequential(self.backbone.conv1,
                                     self.backbone.bn1,
                                     self.backbone.relu,
                                     self.backbone.maxpool)

        self.layer1  = self.backbone.layer1  # 256, 64,64
        self.layer2  = self.backbone.layer2  # 512, 32,32
        self.layer3  = self.backbone.layer3  # 1024,16,16
        self.layer4  = self.backbone.layer4  # 2048,8,8

        self.pd = PartialDecoder([512, 1024, 2048])

        self.region_conv = nn.Sequential(
            nn.Conv2d(256, 64, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(64, 16, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(16, num_classes, 1)
        )

        self.boundary_conv = nn.Sequential(
            nn.Conv2d(256, 64, 3, 1, 1),  # 步长2下采样
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(64, 16, 3, 1, 1),  # 步长2下采样
            nn.ReLU(inplace=True),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(16, num_classes, 1)
        )

        #区域分支
        self.region_mcram1 = MCRAM(in_channels=2048)
        self.region_mcram2 = MCRAM(in_channels=1024)
        self.region_mcram3 = MCRAM(in_channels=512)

        # 边界分支（Boundary Branch）
        self.boundary_mcram1 = MCRAM(in_channels=2048)
        self.boundary_mcram2 = MCRAM(in_channels=1024)
        self.boundary_mcram3 = MCRAM(in_channels=512)

        self.up2x = nn.Upsample(scale_factor=2, mode='bicubic', align_corners=True)

    def forward(self, x):
        # ---- encoder ---- #
        x = self.conv1(x)                       # [B,64,64,64]
        f1 = self.layer1(x)                     # [B,256,64,64]
        f2 = self.layer2(f1)                    # [B,512,32,32]
        f3 = self.layer3(f2)                    # [B,1024,16,16]
        f4 = self.layer4(f3)                    # [B,2048,8,8]

        global_feat = self.pd(f2, f3, f4)           # [B,256,32,32]

        # ---- 区域分支 ---- #
        region_side1 = self.region_conv(global_feat)  # (2,8,8)

        region_side2 = self.region_mcram1(f4, region_side1) + region_side1  # (2,8,8)
        # region_side2 = self.region_att1(region_side2)
        region_side2 = self.up2x(region_side2)  # (2,16,16)

        region_side3 = self.region_mcram2(f3, region_side2) + region_side2  # (2,16,16)
        # region_side3 = self.region_att2(region_side3)
        region_side3 = self.up2x(region_side3)  # (2,32,32)

        region_output = self.region_mcram3(f2, region_side3) + region_side3  # (2,32,32)
        final_out = self.up2x(region_output)

        # ---- 边界分支 ---- #
        boundary_side1 = self.boundary_conv(global_feat) #(2,8,8)

        boundary_side2 = self.boundary_mcram1(f4, boundary_side1) + boundary_side1
        #boundary_side2 = self.boundary_att1(boundary_side2)
        boundary_side2 = self.up2x(boundary_side2)# (2,16,16)

        boundary_side3 = self.boundary_mcram2(f3, boundary_side2) + boundary_side2
        #boundary_side3 = self.boundary_att2(boundary_side3)
        boundary_side3 = self.up2x(boundary_side3)# (2,32,32)

        boundary_output = self.boundary_mcram3(f2, boundary_side3) + boundary_side3
        #boundary_output = self.boundary_att3(boundary_output)
        boundary_output = self.up2x(boundary_output)

        return {
            'final': F.interpolate(final_out,(256,256),mode='bicubic'),  # 最终分割结果
            'region': [region_side2, region_side3, final_out],  # 区域分支侧输出（用于深度监督）
            'boundary': [boundary_side2, boundary_side3, boundary_output],  # 边界分支侧输出（用于高斯监督）
        }


class ContinuityLoss(nn.Module):
    def __init__(self, weight=0.1):
        super().__init__()
        self.weight = weight

    def forward(self, pred):
        # 计算预测结果的二阶导数（衡量不连续性）
        pred_xx = torch.abs(pred[:, :, :, 2:] - 2 * pred[:, :, :, 1:-1] + pred[:, :, :, :-2])
        pred_yy = torch.abs(pred[:, :, 2:, :] - 2 * pred[:, :, 1:-1, :] + pred[:, :, :-2, :])

        # 连续性损失 = 二阶导数之和
        continuity_loss = torch.mean(pred_xx) + torch.mean(pred_yy)

        return self.weight * continuity_loss


# 在现有损失函数中添加连续性损失
class CombinedLoss(nn.Module):
    def __init__(self, alpha=0.2, continuity_weight=0.1):
        super().__init__()
        self.alpha = alpha
        self.continuity_loss = ContinuityLoss(weight=continuity_weight)

    def dice_loss(self, pred, target):
        smooth = 1e-5
        inter = (pred * target).sum()
        union = pred.sum() + target.sum()
        return 1 - (2. * inter + smooth) / (union + smooth)

    def forward(self, preds, masks, contours=None):
        total = 0

        # 原有的损失计算
        for pred in preds['region']:
            pred = F.interpolate(pred, masks.shape[2:], mode='bicubic')
            for c in [0, 1]:
                bce = F.binary_cross_entropy_with_logits(pred[:, c], masks[:, c])
                dice = self.dice_loss(torch.sigmoid(pred[:, c]), masks[:, c])
                total += self.alpha * bce + (1 - self.alpha - 0.1) * dice

        for pred in preds['boundary']:
            pred = F.interpolate(pred, masks.shape[2:], mode='bicubic')
            for c in [0, 1]:
                bce = F.binary_cross_entropy_with_logits(pred[:, c], contours[:, c])
                dice = self.dice_loss(torch.sigmoid(pred[:, c]), contours[:, c])
                total += self.alpha * bce + (1 - self.alpha - 0.1) * dice

        # 添加连续性损失
        continuity_loss = 0
        for pred in preds['region']:
            continuity_loss += self.continuity_loss(torch.sigmoid(pred))

        total += continuity_loss

        return total

# ---------- 5. 自检 ---------- #
if __name__ == '__main__':
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    net = AbgNet1().to(device)
    x   = torch.randn(2,3,256,256).to(device)
    with torch.no_grad():
        out = net(x)['final']
        params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('AbgNet 可训练参数 : {:.2f} M'.format(params/1e6))
    print('input :', x.shape)
    print('output:', out.shape)