import torch
import torch.nn as nn
from torch.nn import functional as F
torch.autograd.set_detect_anomaly(True)
# 基本卷积块
class Conv(nn.Module):
    def __init__(self, C_in, C_out):
        super(Conv, self).__init__()
        self.layer = nn.Sequential(
            nn.Conv2d(C_in, C_out, 3, 1, 1),
            nn.BatchNorm2d(C_out),
            nn.Dropout(0.3),
            nn.LeakyReLU(),
            nn.Conv2d(C_out, C_out, 3, 1, 1),
            nn.BatchNorm2d(C_out),
            nn.Dropout(0.4),
            nn.LeakyReLU(),
        )

    def forward(self, x):
        return self.layer(x)

# 下采样模块
class DownSampling(nn.Module):
    def __init__(self, C):
        super(DownSampling, self).__init__()
        self.Down = nn.Sequential(
            nn.Conv2d(C, C, 3, 2, 1),
            nn.LeakyReLU()
        )

    def forward(self, x):
        return self.Down(x)

# 上采样模块
class UpSampling(nn.Module):
    def __init__(self, C):
        super(UpSampling, self).__init__()
        self.Up = nn.Conv2d(C, C // 2, 1, 1)

    def forward(self, x, r):
        up = F.interpolate(x, scale_factor=2, mode="nearest")
        x = self.Up(up)
        return torch.cat((x, r), 1)

# 分支1：Noise MLP（逐像素随机映射）
class NoiseMLP(nn.Module):
    def __init__(self, in_channels=3, hidden=64):
        super(NoiseMLP, self).__init__()
        self.layers = nn.Sequential(
            nn.Conv2d(in_channels, hidden, 1),  # 逐像素映射
            nn.ReLU(inplace=True),
            nn.Conv2d(hidden, hidden, 1),
            nn.ReLU(inplace=True)
        )
    def forward(self, x):
        return self.layers(x)

# 分支2：轻量级图像编码器
class SmallEncoder(nn.Module):
    def __init__(self, in_channels=3, out_channels=64):
        super(SmallEncoder, self).__init__()
        self.layers = nn.Sequential(
            nn.Conv2d(in_channels, 32, 3, 1, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(32, out_channels, 3, 1, 1),
            nn.ReLU(inplace=True)
        )
    def forward(self, x):
        return self.layers(x)

# 主干：你之前写的 U-Net（稍改动输入通道）
class UNet(nn.Module):
    def __init__(self, in_channels=128):  # concat 之后 = 64+64
        super(UNet, self).__init__()

        self.C1 = Conv(in_channels, 64)
        self.D1 = DownSampling(64)
        self.C2 = Conv(64, 128)
        self.D2 = DownSampling(128)
        self.C3 = Conv(128, 256)
        self.D3 = DownSampling(256)
        self.C4 = Conv(256, 512)
        self.D4 = DownSampling(512)
        self.C5 = Conv(512, 1024)

        self.U1 = UpSampling(1024)
        self.C6 = Conv(1024, 512)
        self.U2 = UpSampling(512)
        self.C7 = Conv(512, 256)
        self.U3 = UpSampling(256)
        self.C8 = Conv(256, 128)
        self.U4 = UpSampling(128)
        self.C9 = Conv(128, 64)

        self.Th = nn.Tanh()
        self.pred = nn.Conv2d(64, 3, 3, 1, 1)

    def forward(self, x):
        R1 = self.C1(x)
        R2 = self.C2(self.D1(R1)) 
        R3 = self.C3(self.D2(R2))
        R4 = self.C4(self.D3(R3))
        Y1 = self.C5(self.D4(R4))

        O1 = self.C6(self.U1(Y1, R4))
        O2 = self.C7(self.U2(O1, R3))
        O3 = self.C8(self.U3(O2, R2))
        O4 = self.C9(self.U4(O3, R1))

        return self.Th(self.pred(O4))

# 总体网络
class DistortionNet(nn.Module):
    def __init__(self):
        super(DistortionNet, self).__init__()
        self.noise_branch = NoiseMLP(in_channels=3, hidden=64)
        self.image_branch = SmallEncoder(in_channels=3, out_channels=64)
        self.unet = UNet(in_channels=128)  # 融合输入

    def forward(self, Iori):
        B, C, H, W = Iori.shape
        noise = torch.rand(B, 3, H, W)  # 均匀噪声
        noise = noise.cuda()
        Iori = Iori.cuda()
        feat_noise = self.noise_branch(noise)
        feat_image = self.image_branch(Iori)

        fused = torch.cat([feat_noise, feat_image], dim=1)
        D = self.unet(fused)

        return Iori + D  # 返回失真图像和噪声
