# fft以及CBAM模块
import torch
import torch.nn as nn
import math
from einops import rearrange
from timm.models.layers import DropPath
import numbers

class SpectralGatingNetwork(nn.Module):
    def __init__(self, dim):
        super().__init__()
        assert dim > 0, "dim must be greater than 0"
        # Conv layers for real and imaginary parts
        self.conv_real = nn.Conv2d(dim, dim, kernel_size=1)
        self.conv_imag = nn.Conv2d(dim, dim, kernel_size=1)

        # Channel attention for real and imaginary parts
        reduction_ratio = max(1, dim // 16)  # Ensure reduction_ratio is at least 1
        self.channel_attention_real = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(dim, reduction_ratio, kernel_size=1),
            nn.ReLU(),
            nn.Conv2d(reduction_ratio, dim, kernel_size=1),
            nn.Sigmoid()
        )
        for layer in self.channel_attention_real:
            if isinstance(layer, nn.Conv2d):
                nn.init.kaiming_normal_(layer.weight)

        self.channel_attention_imag = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(dim, reduction_ratio, kernel_size=1),
            nn.ReLU(),
            nn.Conv2d(reduction_ratio, dim, kernel_size=1),
            nn.Sigmoid()
        )
        for layer in self.channel_attention_imag:
            if isinstance(layer, nn.Conv2d):
                nn.init.kaiming_normal_(layer.weight)

    def forward(self, x, spatial_size=None):
        B, N, C = x.shape
        assert C > 0, "Channel dimension (C) must be greater than 0"
        if spatial_size is None:
            a = b = int(math.sqrt(N))
        else:
            a, b = spatial_size

        with torch.cuda.amp.autocast(enabled=False):
            x = x.view(B, a, b, C)
            x = torch.fft.rfft2(x, dim=(1, 2), norm='ortho')
            real_part = x.real
            imag_part = x.imag

            # Ensure real_part and imag_part have the correct shape
            real_part = real_part.permute(0, 3, 1, 2)  # Shape: (B, C, a, b//2+1)
            imag_part = imag_part.permute(0, 3, 1, 2)  # Shape: (B, C, a, b//2+1)

            # Apply convolution on real and imaginary parts
            real_conv = self.conv_real(real_part)
            imag_conv = self.conv_imag(imag_part)

            # Compute channel attention for real and imaginary parts
            real_attention = self.channel_attention_real(real_conv)  # Shape: (B, C, 1, 1)
            imag_attention = self.channel_attention_imag(imag_conv)  # Shape: (B, C, 1, 1)

            # Rescale the real and imaginary parts
            real_conv = real_conv * (1 + real_attention)
            imag_conv = imag_conv * (1 + imag_attention)

            real_conv = real_conv.permute(0, 2, 3, 1)  # Shape: (B, a, b//2+1, C)
            imag_conv = imag_conv.permute(0, 2, 3, 1)  # Shape: (B, a, b//2+1, C)

            x_fft_enhanced = torch.complex(real_conv, imag_conv)

            x = torch.fft.irfft2(x_fft_enhanced, s=(a, b), dim=(1, 2), norm='ortho')  # Shape: (B, a, b, C)

            x = x.reshape(B, N, C)

            return x
class Mlp(nn.Module):
    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
        super().__init__()
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        self.fc1 = nn.Linear(in_features, hidden_features)
        self.act = act_layer()
        self.fc2 = nn.Linear(hidden_features, out_features)
        self.drop = nn.Dropout(drop)

    def forward(self, x):
        x = self.fc1(x)
        x = self.act(x)
        x = self.drop(x)
        x = self.fc2(x)
        x = self.drop(x)
        return x

class BiasFree_LayerNorm(nn.Module):
    def __init__(self, normalized_shape):
        super(BiasFree_LayerNorm, self).__init__()
        if isinstance(normalized_shape, numbers.Integral):
            normalized_shape = (normalized_shape,)
        normalized_shape = torch.Size(normalized_shape)

        assert len(normalized_shape) == 1

        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.normalized_shape = normalized_shape

    def forward(self, x):
        sigma = x.var(-1, keepdim=True, unbiased=False)
        return x / torch.sqrt(sigma + 1e-5) * self.weight
class WithBias_LayerNorm(nn.Module):
    def __init__(self, normalized_shape):
        super(WithBias_LayerNorm, self).__init__()
        if isinstance(normalized_shape, numbers.Integral):
            normalized_shape = (normalized_shape,)
        normalized_shape = torch.Size(normalized_shape)

        assert len(normalized_shape) == 1

        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.bias = nn.Parameter(torch.zeros(normalized_shape))
        self.normalized_shape = normalized_shape

    def forward(self, x):
        mu = x.mean(-1, keepdim=True)
        sigma = x.var(-1, keepdim=True, unbiased=False)
        return (x - mu) / torch.sqrt(sigma + 1e-5) * self.weight + self.bias

class LayerNorm(nn.Module):
    def __init__(self, dim, LayerNorm_type):
        super(LayerNorm, self).__init__()
        if LayerNorm_type == 'BiasFree':
            self.body = BiasFree_LayerNorm(dim)
        else:
            self.body = WithBias_LayerNorm(dim)

    def forward(self, x):
        h, w = x.shape[-2:]
        return to_4d(self.body(to_3d(x)), h, w)

class FFTBlock(nn.Module):

    def __init__(self, dim, mlp_ratio=2., drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm):
        super(FFTBlock,self).__init__()
        assert dim > 0, "dim must be greater than 0"
        self.norm1 = norm_layer(dim, LayerNorm_type='WithBias')
        self.filter1 = SpectralGatingNetwork(dim)
        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
        self.norm2 = norm_layer(dim, LayerNorm_type='WithBias')
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)

    def forward(self, x, spatial_size=(64, 80)):
        y = self.norm1(x)
        y = self.filter1(to_3d(y), spatial_size=spatial_size)
        y = to_4d(y, x.shape[-2], x.shape[-1])
        y = self.norm2(y)
        y = self.mlp(to_3d(y))
        y = to_4d(y, x.shape[-2], x.shape[-1])
        y = self.drop_path(y)
        # x = x + self.drop_path(self.mlp(self.norm2(self.filter(self.norm1(x)))))
        return x + y




def to_3d(x):
    return rearrange(x, 'b c h w -> b (h w) c')


def to_4d(x, h, w):
    return rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)


if __name__ == '__main__':
    # 测试门控网络
    x = torch.randn(2, 3, 512, 640)
    sgBlock = FFTBlock(dim=3,mlp_ratio=2.)
    out = sgBlock(x,spatial_size=(512, 640))
    print(out.shape)
