import torch
from torch import nn
import torch.nn.functional as F
from pytorch_wavelets import DWTForward, DWTInverse
from timm.models.layers import DropPath
from timm.models.layers import trunc_normal_

"""
自适应特征增强模块AFE：可应用于语义分割任务的特征增强模块
论文地址：https://arxiv.org/pdf/2407.09379

模块原理：
1. 输入特征：接受经过归一化和卷积嵌入处理后的特征，该特征在一定程度上学习了泛化和判别能力，且通道已被压缩一办以减少计算量并促进特征混合
2. 模块组成与处理
A 空间上下文模块（SCM）：采用较大核（7*7）的分组卷积，旨在增加感受野，从而能够在更大范围内捕捉空间上下文信息，以应对场景中的尺度变化
B 特征细化模块（FRM）：受图像锐化和对比度增强概念启发，负责生成语义线索，通过下采样，上采样，特征差异计算和元素级乘法等操作，捕获低频和高频区域特征。
3. 输出融合与增强：SCM和FRM的输出先通过1*1卷积层进行融合，然后再经过卷积多层感知器进一步增强特征表示，使模型能够更好地处理杂乱背景下的语义分割任务。
"""


class LayerNorm(nn.Module):
    def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
        super().__init__()
        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.bias = nn.Parameter(torch.zeros(normalized_shape))
        self.eps = eps
        self.data_format = data_format
        if self.data_format not in ["channels_last", "channels_first"]:
            raise NotImplementedError
        self.normalized_shape = (normalized_shape,)

    def forward(self, x):
        if self.data_format == "channels_last":
            return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
        elif self.data_format == "channels_first":
            u = x.mean(1, keepdim=True)
            s = (x - u).pow(2).mean(1, keepdim=True)
            x = (x - u) / torch.sqrt(s + self.eps)
            x = self.weight[:, None, None] * x + self.bias[:, None, None]
            return x


class FeatureRefinementModule(nn.Module):
    def __init__(self, in_dim=16, out_dim=16, down_kernel=5, down_stride=4):
        super().__init__()

        self.lconv = nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=1, padding=1, groups=in_dim)
        self.hconv = nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=1, padding=1, groups=in_dim)
        self.norm1 = LayerNorm(in_dim, eps=1e-6, data_format="channels_first")
        self.norm2 = LayerNorm(in_dim, eps=1e-6, data_format="channels_first")
        self.act = nn.GELU()
        self.down = nn.Conv2d(in_dim, in_dim, kernel_size=down_kernel, stride=down_stride, padding=down_kernel // 2,
                              groups=in_dim)
        self.proj = nn.Conv2d(in_dim * 2, out_dim, kernel_size=1, stride=1, padding=0)

        self.apply(self._init_weights)

    def _init_weights(self, m):

        if isinstance(m, (nn.Conv2d, nn.Linear)):
            trunc_normal_(m.weight, std=.02)
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)

        elif isinstance(m, (LayerNorm, nn.LayerNorm)):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    def forward(self, x):
        B, C, H, W = x.shape

        dx = self.down(x)  # [B, C, H_down, W_down]
        udx = F.interpolate(dx, size=(H, W), mode='bilinear', align_corners=False)  # [B, C, H, W]

        print(f"FeatureRefinementModule - x shape: {x.shape}")  # [B, C, H, W]
        print(f"FeatureRefinementModule - dx shape: {dx.shape}")  # [B, C, H_down, W_down]
        print(f"FeatureRefinementModule - udx shape: {udx.shape}")  # [B, C, H, W]

        lx = self.norm1(self.lconv(self.act(x * udx)))  # [B, C, H, W]
        hx = self.norm2(self.hconv(self.act(x - udx)))  # [B, C, H, W]

        out = self.act(self.proj(torch.cat([lx, hx], dim=1)))  # [B, out_dim, H, W]

        print(f"FeatureRefinementModule - out shape: {out.shape}")  # [B, out_dim, H, W]

        return out


class FrequencyEnhancementModule(nn.Module):
    def __init__(self, dim, wave='haar', mode='zero'):
        super(FrequencyEnhancementModule, self).__init__()
        self.dwt = DWTForward(J=1, mode=mode, wave=wave)
        self.idwt = DWTInverse(mode=mode, wave=wave)

        # 处理高频分量的卷积层
        # 确保高频分量的通道数为 3 * dim
        self.high_freq_conv = nn.Sequential(
            nn.Conv2d(3 * dim, 3 * dim, kernel_size=3, padding=1, groups=3 * dim),
            nn.BatchNorm2d(3 * dim),
            nn.ReLU(inplace=True)
        )

        # 处理低频分量
        self.low_freq_conv = nn.Sequential(
            nn.Conv2d(dim, dim, kernel_size=3, padding=1),
            nn.BatchNorm2d(dim),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        # x: [B, dim, H, W]
        x_low, x_high = self.dwt(x)  # x_low: [B, dim, H/2, W/2]; x_high: list of 3 [B, dim, H/2, W/2]

        print(f"FrequencyEnhancementModule - x_low shape: {x_low.shape}")  # [B, dim, H/2, W/2]
        print(f"FrequencyEnhancementModule - x_high type: {type(x_high)}")  # list

        # 确保 x_high 是包含三个子带的列表
        if isinstance(x_high, (list, tuple)) and len(x_high) == 3:
            x_high_cat = torch.cat(x_high, dim=1)  # [B, 3*dim, H/2, W/2]
        else:
            # 如果 x_high 不是预期的列表，则进行重塑
            # 假设 x_high 是一个包含一个张量的列表，且张量形状为 [B, dim, 3, H/2, W/2]
            if isinstance(x_high, (list, tuple)) and len(x_high) == 1:
                x_high_cat = x_high[0].reshape(x_high[0].shape[0], x_high[0].shape[1] * x_high[0].shape[2],
                                               x_high[0].shape[3], x_high[0].shape[4])
                print(f"FrequencyEnhancementModule - Reshaped x_high_cat shape: {x_high_cat.shape}")
            else:
                raise ValueError(f"Unexpected x_high format: {type(x_high)} with shape {x_high[0].shape}")

        print(f"FrequencyEnhancementModule - x_high shape after concat: {x_high_cat.shape}")  # [B, 3*dim, H/2, W/2]

        # 处理低频和高频
        x_low = self.low_freq_conv(x_low)  # [B, dim, H/2, W/2]
        x_high = self.high_freq_conv(x_high_cat)  # [B, 3*dim, H/2, W/2]

        print(f"FrequencyEnhancementModule - x_low after conv: {x_low.shape}")  # [B, dim, H/2, W/2]
        print(f"FrequencyEnhancementModule - x_high after conv: {x_high.shape}")  # [B, 3*dim, H/2, W/2]

        # 将处理后的高频分量分割回各个子带
        subband_channels = x_high.shape[1] // 3  # 应为 dim
        x_high_list = [x_high[:, i * subband_channels:(i + 1) * subband_channels, :, :] for i in
                       range(3)]  # list of [B, dim, H/2, W/2]

        # 重构
        out = self.idwt([x_low], [x_high_list])  # [B, dim, H, W]

        print(f"FrequencyEnhancementModule - out shape after idwt: {out.shape}")  # [B, dim, H, W]

        return out


class AFE(nn.Module):
    def __init__(self, dim, kernel_size=3):
        super().__init__()

        self.dwconv = nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2, groups=dim)
        self.proj1 = nn.Conv2d(dim, dim // 2, 1, padding=0)
        self.proj2 = nn.Conv2d(dim, dim, 1, padding=0)

        self.ctx_conv = nn.Conv2d(dim // 2, dim // 2, kernel_size=7, padding=3, groups=4)

        self.norm1 = LayerNorm(dim, eps=1e-6, data_format="channels_first")
        self.norm2 = LayerNorm(dim // 2, eps=1e-6, data_format="channels_first")
        self.norm3 = LayerNorm(dim // 2, eps=1e-6, data_format="channels_first")

        self.enhance = FeatureRefinementModule(in_dim=dim // 2, out_dim=dim // 2, down_kernel=3, down_stride=2)

        self.freq_enhance = FrequencyEnhancementModule(dim=dim)  # 添加频率增强模块

        self.act = nn.GELU()

        self.proj_fusion = nn.Conv2d(dim, dim, kernel_size=1, padding=0)  # 融合频率增强后的特征

    def forward(self, x):
        B, C, H, W = x.shape

        # 空间上下文模块
        x = x + self.norm1(self.act(self.dwconv(x)))  # [B, dim, H, W]
        x = self.norm2(self.act(self.proj1(x)))  # [B, dim//2, H, W]

        # 上下文卷积
        ctx = self.norm3(self.act(self.ctx_conv(x)))  # [B, dim//2, H, W]

        # 特征细化模块
        enh_x = self.enhance(x)  # [B, dim//2, H, W]
        x = self.act(self.proj2(torch.cat([ctx, enh_x], dim=1)))  # [B, dim, H, W]

        print(f"AFE - Before frequency enhancement, x shape: {x.shape}")  # [B, dim, H, W]

        # 频率域增强
        freq_enhanced = self.freq_enhance(x)  # [B, dim, H, W]

        print(f"AFE - After frequency enhancement, freq_enhanced shape: {freq_enhanced.shape}")  # [B, dim, H, W]

        # 融合频率增强后的特征
        x = self.act(self.proj_fusion(x + freq_enhanced))  # [B, dim, H, W]

        print(f"AFE - After fusion, x shape: {x.shape}")  # [B, dim, H, W]

        return x


class Block(nn.Module):
    def __init__(self, dim, drop_path=0.1, expan_ratio=4,
                 kernel_size=3, use_dilated_mlp=False):
        super().__init__()

        self.layer_norm1 = LayerNorm(dim, eps=1e-6, data_format="channels_first")
        self.layer_norm2 = LayerNorm(dim, eps=1e-6, data_format="channels_first")

        if use_dilated_mlp:
            self.mlp = AtrousMLP(dim=dim, mlp_ratio=expan_ratio)
        else:
            self.mlp = MLP(dim=dim, mlp_ratio=expan_ratio)
        self.attn = AFE(dim, kernel_size=kernel_size)

        self.drop_path_1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
        self.drop_path_2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()

    def forward(self, x):
        B, C, H, W = x.shape

        inp_copy = x
        x = self.layer_norm1(inp_copy)
        x = self.drop_path_1(self.attn(x))
        out = x + inp_copy

        x = self.layer_norm2(out)
        x = self.drop_path_2(self.mlp(x))
        out = out + x

        return out


class MLP(nn.Module):
    def __init__(self, dim, mlp_ratio=4, use_dcn=False):
        super().__init__()

        self.fc1 = nn.Conv2d(dim, dim * mlp_ratio, 1)
        self.pos = nn.Conv2d(dim * mlp_ratio, dim * mlp_ratio, 3, padding=1, groups=dim * mlp_ratio)
        self.fc2 = nn.Conv2d(dim * mlp_ratio, dim, 1)
        self.act = nn.GELU()

    def forward(self, x):
        B, C, H, W = x.shape

        x = self.fc1(x)  # [B, dim*mlp_ratio, H, W]
        x = self.act(x)
        x = x + self.act(self.pos(x))  # [B, dim*mlp_ratio, H, W]
        x = self.fc2(x)  # [B, dim, H, W]

        return x


class AtrousMLP(nn.Module):
    def __init__(self, dim, mlp_ratio=4):
        super().__init__()

        self.fc1 = nn.Conv2d(dim, dim * mlp_ratio, 1)
        self.pos1 = nn.Conv2d(dim * mlp_ratio, dim * 2, 3, padding=1, groups=dim * 2)
        self.pos2 = nn.Conv2d(dim * mlp_ratio, dim * 2, 3, padding=2, dilation=2, groups=dim * 2)
        self.fc2 = nn.Conv2d(dim * mlp_ratio * 2, dim, 1)
        self.act = nn.GELU()

    def forward(self, x):
        B, C, H, W = x.shape

        x = self.act(self.fc1(x))  # [B, dim*mlp_ratio, H, W]
        x1 = self.act(self.pos1(x))  # [B, dim*2, H, W]
        x2 = self.act(self.pos2(x))  # [B, dim*2, H, W]
        x_a = torch.cat([x1, x2], dim=1)  # [B, dim*4, H, W]
        x = self.fc2(x_a)  # [B, dim, H, W]

        return x


# 测试代码
if __name__ == '__main__':
    input = torch.randn(4, 32, 480, 640)
    AFEBlock = AFE(dim=32)
    output = AFEBlock(input)
    print("Input shape:", input.shape)
    print("Output shape:", output.shape)
