import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models


# ------------- 可变形交互式注意力模块-------------
class DeformableInteractiveAttention(nn.Module):
    def __init__(self, stride=1, distortionmode=True):
        super().__init__()
        self.distortionmode = distortionmode
        self.conv = nn.Conv2d(2, 1, 3, 1, 1)
        self.sigmoid = nn.Sigmoid()
        self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
        self.downavg = nn.Conv2d(1, 1, 3, 2, 1)
        self.downmax = nn.Conv2d(1, 1, 3, 2, 1)

        if distortionmode:
            self.d_conv = nn.Conv2d(1, 1, 3, stride, 1)
            self.d_conv1 = nn.Conv2d(1, 1, 3, stride, 1)
            nn.init.constant_(self.d_conv.weight, 0)
            nn.init.constant_(self.d_conv1.weight, 0)

    def forward(self, x):
        avg = torch.mean(x, dim=1, keepdim=True)
        max_ = torch.max(x, dim=1, keepdim=True)[0]
        avg = self.downavg(avg)
        max_ = self.downmax(max_)
        out = torch.cat([max_, avg], dim=1)
        if self.distortionmode:
            d_avg = torch.sigmoid(self.d_conv(avg))
            d_max = torch.sigmoid(self.d_conv1(max_))
            out = torch.cat([d_avg * max_, d_max * avg], dim=1)
        out = self.conv(out)
        mask = self.sigmoid(self.upsample(out))
        return F.relu(x * mask, inplace=True)

# ---------------- 解码器单次上采样块 ----------------
#输入[c,h,w],[c//2,h*2,w*2]
#输出[outc,h*w,w*2]
class UpBlock(nn.Module):
    def __init__(self, in_c, skip_c, out_c, distortion=True):
        super().__init__()
        self.up = nn.ConvTranspose2d(in_c, in_c // 2, 2, stride=2)
        # 添加1x1卷积调整skip连接的通道数
        self.skip_conv = nn.Conv2d(skip_c, in_c // 2, 1)
        self.conv = nn.Sequential(
            nn.Conv2d(in_c, out_c, 3, 1, 1),  # 输入通道数现在是in_c（上采样后与调整后的skip连接拼接）
            nn.BatchNorm2d(out_c),
            nn.ReLU(inplace=True)
        )
        self.att = DeformableInteractiveAttention(distortionmode=distortion)

    def forward(self, x, skip):
        x = self.up(x)                 # 2×上采样
        skip = self.skip_conv(skip)    # 调整skip连接的通道数
        x = torch.cat([x, skip], dim=1)  # 跳连
        x = self.conv(x)
        x = self.att(x)                # 插入注意力
        return x


# ---------------- 完整 U-Net ----------------
class DIANet(nn.Module):
    def __init__(self, distortion=True, num_classes=2):
        super().__init__()
        # 编码器：ResNet-50 前 4 个 stage
        backbone = models.resnet50(weights='IMAGENET1K_V2')

        self.enc0 = nn.Sequential(backbone.conv1, backbone.bn1, backbone.relu)
        self.enc1 = nn.Sequential(backbone.maxpool, backbone.layer1)  # 256 channels
        self.enc2 = backbone.layer2  # 512 channels
        self.enc3 = backbone.layer3  # 1024 channels
        self.enc4 = backbone.layer4  # 2048 channels

        # 解码器 - 调整通道数以匹配ResNet50的输出
        self.up4 = UpBlock(2048, 1024, 1024, distortion)
        self.up3 = UpBlock(1024, 512, 512, distortion)
        self.up2 = UpBlock(512, 256, 256, distortion)
        self.up1 = UpBlock(256, 64, 64, distortion)
        self.up0 = UpBlock(64, 32, 32, distortion)

        self.conv = nn.Conv2d(3, 32, 3, padding=1)
        # 头：二分类
        self.head = nn.Conv2d(32, num_classes, 1)

    def forward(self, x):
        e0 = self.enc0(x)   # 64, H/2, W/2
        e1 = self.enc1(e0)  # 256, H/4, W/4
        e2 = self.enc2(e1)  # 512, H/8, W/8
        e3 = self.enc3(e2)  # 1024, H/16, W/16
        e4 = self.enc4(e3)  # 2048, H/32, W/32

        d4 = self.up4(e4, e3) # 1024, H/16
        d3 = self.up3(d4, e2) # 512, H/8
        d2 = self.up2(d3, e1) # 256, H/4
        d1 = self.up1(d2, e0) # 64, H/2
        d0 = self.up0(d1, self.conv(x))  # 32, H
        final_out = self.head(d0)

        return {'final': final_out}   # 1×H×W  logits


class CombinedLoss(nn.Module):
    def __init__(self, alpha=0.4, beta=0.4, gamma=0.2, smooth=1e-5):
        super().__init__()
        self.alpha = alpha      # BCE-Dice 权重
        self.beta  = beta       # 区域约束权重
        self.gamma = gamma      # 轮廓权重
        self.smooth = smooth
        self.bce = nn.BCEWithLogitsLoss()

    def forward(self, logits, mask, contour):
        """
        logits, mask: [B,1,H,W]  mask∈{0,1}  0=鼻咽(绿) 1=腺样体(红)
        """
        logits = logits['final']

        # 1. 基础 BCE-Dice
        bce = self.bce(logits, mask.float())
        pred = torch.sigmoid(logits)
        inter = (pred * mask).sum((2,3))
        union = pred.sum((2,3)) + mask.sum((2,3))
        dice = 1 - (2*inter + self.smooth)/(union + self.smooth)
        loss_base = bce + dice.mean()

        # 3. 轮廓损失（前景轮廓像素加权 BCE）
        if contour.sum() > 0:                        # 避免空图
            loss_contour = F.binary_cross_entropy_with_logits(
                logits, mask.float(), weight=contour * 7.0)
        else:
            loss_contour = 0.0

        return (self.alpha+self.beta) * loss_base + self.gamma * loss_contour

# ---------------- 简单测试 ----------------
if __name__ == '__main__':
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    net = DIANet().to(device)
    x = torch.randn(2, 3, 256, 256).to(device)
    with torch.no_grad():
        out = net(x)['final']
        params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('可训练参数 : {:.2f} M'.format(params / 1e6))
    print('input :', x.shape)
    print('output:', out.shape)