import torch
import torch.nn as nn

"""
FFCM model
文章名称：Efficient Frequency-Domain Image Deraining with Contrastive Regularization
github地址：https://github.com/deng-ai-lab/FADformer/blob/main/models/FADformer.py
摘要：

"""


class EnhancedFourierUnit(nn.Module):
    def __init__(self, in_channels, out_channels, groups=1):
        super().__init__()
        self.conv_low = nn.Conv2d(in_channels * 2, in_channels * 2, 1, groups=in_channels)
        self.conv_high = nn.Conv2d(in_channels * 2, in_channels * 2, 1, groups=in_channels)

        self.freq_att = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_channels, in_channels // 8, 1),
            nn.ReLU(),
            nn.Conv2d(in_channels // 8, 2, 1),
            nn.Softmax(dim=1)
        )

        self.res_conv = nn.Conv2d(in_channels, out_channels, 1) if in_channels != out_channels else nn.Identity()

    def forward(self, x):
        residual = self.res_conv(x)

        # 频域转换
        fft = torch.fft.rfft2(x, norm='ortho')
        fft = torch.cat([fft.real, fft.imag], dim=1)  # [B, 2C, H, W]

        # 频域注意力
        att = self.freq_att(x)  # [B, 2, 1, 1]
        low = self.conv_low(fft) * att[:, 0].unsqueeze(1)
        high = self.conv_high(fft) * att[:, 1].unsqueeze(1)

        # 逆变换
        fft_real, fft_imag = torch.chunk(low + high, 2, dim=1)
        fft = torch.complex(fft_real, fft_imag)
        x = torch.fft.irfft2(fft, s=x.shape[-2:], norm='ortho')

        return x + residual


class SFCAttention(nn.Module):
    def __init__(self, dim):
        super().__init__()
        # 频率分支
        self.freq_att = nn.Sequential(
            nn.Conv2d(dim, dim // 8, 1),
            nn.GELU(),
            EnhancedFourierUnit(dim // 8, dim // 8),
            nn.Conv2d(dim // 8, dim, 1),
            nn.Sigmoid()
        )

        # 空间分支
        self.spatial_att = nn.Sequential(
            nn.Conv2d(dim, dim // 8, 1),
            nn.GELU(),
            nn.Conv2d(dim // 8, 1, 1),
            nn.Sigmoid()
        )

    def forward(self, x):
        # 频率注意力
        f_att = self.freq_att(x.mean(dim=(2, 3), keepdim=True))

        # 空间注意力
        s_att = self.spatial_att(x)

        return x * (f_att + s_att)


class DynamicFusion(nn.Module):
    def __init__(self, dim):
        super().__init__()
        # 动态权重生成
        self.weights = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(dim * 2, dim // 2, 1),
            nn.ReLU(),
            nn.Conv2d(dim // 2, 3, 1),
            nn.Softmax(dim=1)
        )

        # 多尺度特征提取
        self.conv3x3 = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
        self.conv5x5 = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
        self.fourier = EnhancedFourierUnit(dim, dim)

    def forward(self, x):
        x1 = self.conv3x3(x)
        x2 = self.conv5x5(x)
        x3 = self.fourier(x)

        # 动态加权融合
        weights = self.weights(torch.cat([x1, x2], dim=1))
        return weights[:, 0].unsqueeze(1) * x1 + weights[:, 1].unsqueeze(1) * x2 + weights[:, 2].unsqueeze(1) * x3


class EnhancedFFCM(nn.Module):
    def __init__(self, dim):
        super().__init__()
        # 特征增强
        self.dynamic_fusion = DynamicFusion(dim)

        # 注意力机制
        self.sfc_att = SFCAttention(dim)

        # 特征精炼
        self.refine = nn.Sequential(
            nn.Conv2d(dim, dim * 2, 1),
            nn.GELU(),
            nn.Conv2d(dim * 2, dim, 1)
        )

    def forward(self, x):
        x = self.dynamic_fusion(x)
        x = self.sfc_att(x)
        return self.refine(x) + x

if __name__ == '__main__':
    x = torch.randn(4, 192, 120, 160)
    model = EnhancedFFCM(dim=192)
    output = model(x)
    print("shape:", output.shape)