from ..models import common
# from models import common
import torch
import torch.nn as nn
import torch.nn.functional as F
# from models.wave import DWT_2D, IDWT_2D
from ..models.wave import DWT_2D, IDWT_2D
from einops import rearrange
import numbers

'''
整体设计思路:
    多尺度特征：通过小波变换和池化操作，在不同尺度提取特征
    特征融合：密集链接增强局部特征复用，Transformer捕捉全局依赖，Fusion模块融合跨尺度特征
    残差与注意力：残差连接缓解梯度问题，通道注意力突出关键特征，提升模型表现力
'''



## Channel Attention (CA) Layer

'''
通道注意力模块
作用：
    对输入特征的通道维度进行加权，突出重要通道特征
实现：
    先通过平均池化将每个通道的空间信息压缩为一个标量（AdaptiveAvgPool2d(1)）。
    再通过两层1x1卷积构成的瓶颈结构生成通道权重
    最后将输入特征与通道权重逐元素相乘实现注意力加权
'''
class CALayer(nn.Module):
    def __init__(self, channel, reduction=16):
        super(CALayer, self).__init__()
        # global average pooling: feature --> point
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        # feature channel downscale and upscale --> channel weight
        self.conv_du = nn.Sequential(
            nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
            nn.ReLU(inplace=True),
            nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
            nn.Sigmoid()
        )

    def forward(self, x):
        y = self.avg_pool(x)
        y = self.conv_du(y)
        return x * y




'''
密集连接模块
作用：
    通过多层卷积的密集残差连接，增强特征的传播和复用
实现：
    包含6层3x3卷积（保持通道数和空间尺寸不变）。
    每一层的输出都与之前所有层的输出（包含输入）相加后再经过GELU激活，形成“累计残差”结构：
    x1=GELU(conv1(x)+x)
    x2=GELU(conv2(x1)+x1+x)
    以此类推
'''
class Dense(nn.Module):
    def __init__(self, in_channels):
        super(Dense, self).__init__()

        # self.norm = nn.LayerNorm([in_channels, 128, 128])  # Assuming input size is [224, 224]
        self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1,stride=1)
        self.conv2 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1,stride=1)
        self.conv3 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1,stride=1)
        self.conv4 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1,stride=1)
        self.conv5 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1,stride=1)
        self.conv6 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1, stride=1)

        self.gelu = nn.GELU()

    def forward(self, x):

        x1 = self.conv1(x)
        x1 = self.gelu(x1+x)

        x2 = self.conv2(x1)
        x2 = self.gelu(x2+x1+x)

        x3 = self.conv3(x2)
        x3 = self.gelu(x3+x2+x1+x)

        x4 = self.conv4(x3)
        x4 = self.gelu(x4+x3+x2+x1+x)

        x5 = self.conv5(x4)
        x5 = self.gelu(x5+x4+x3+x2+x1+x)

        x6= self.conv6(x5)
        x6 = self.gelu(x6+x5+x4+x3+x2+x1+x)

        return x6


'''
残差快
作用：
    通过短链接缓解深层网络的梯度消失问题
实现：
    两层3x3卷积，中间用GLUE激活，最后将输出与输入相加（残差链接）
'''
class ResNet(nn.Module):
    def __init__(self, in_channels):
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1)

    def forward(self, x):
        out1 = F.gelu(self.conv1(x))
        out2 = F.gelu(self.conv2(out1))
        out2 += x  # Residual connection
        return out2







'''
融合模块
作用：
    结合小波变换的多尺度特征与另一输入特征，实现跨尺度融合
核心步骤：
    对输入x1进行二维离散小波变换（DWT），得到低频分量ll和高频分量lh,hl,hh
    高频分量拼接后通过1x1卷积进行降维，经过ResNet处理后再升维，得到处理后的高频特征
    低频分量ll与另一输入x2拼接，经过1x1卷积降维和ResNet处理后，得到融合后的低频特征
    将处理后的高频特征和低频特征拼接，通过逆小波变换（IDWT）恢复到原尺度，输出融合结果
细节：
    若ll与x2的空间尺寸不匹配，通过补零(pad)调整
'''
class Fusion(nn.Module):
    def __init__(self, in_channels, wave):
        super(Fusion, self).__init__()
        self.dwt = DWT_2D(wave)
        self.convh1 = nn.Conv2d(in_channels * 3, in_channels, kernel_size=1, stride=1, padding=0, bias=True)
        self.high = ResNet(in_channels)
        self.convh2 = nn.Conv2d(in_channels, in_channels * 3, kernel_size=1, stride=1, padding=0, bias=True)
        self.convl = nn.Conv2d(in_channels * 2, in_channels, kernel_size=1, stride=1, padding=0, bias=True)
        self.low = ResNet(in_channels)


        self.idwt = IDWT_2D(wave)


    def forward(self, x1,x2):
        #获取输入向量x1的形状参数（[1, 32, 32, 32]）
        b, c, h, w = x1.shape
        #在离散小波变换中，2D图像信号会被分成四个部分，这是因为二维DWT将输入信号在水平和垂直两个方向上都进行了低通和高通滤波([1, 128, 16, 16])
        x_dwt = self.dwt(x1)#先对输入x1进行小波变换，分离高低频信息
        ll, lh, hl, hh = x_dwt.split(c, 1)
        '''
        ll(low-low):低频-低频部分,首先对图像水平方向进行低通滤波，然后再对结果进行垂直方向的低通滤波得到的，保留图像中的低频信息，即变化比较慢的部分，
                    比如大的结构、背景和整体亮度等。
        lh(low-high):低频-高频部分,主要包含图像的水平边缘细节
        hl(high-low):高频-低频部分,主要包含图像的垂直边缘细节
        hh(high-high):高频-高频部分,主要包含图像的对角线和边缘细节纹理
        以上形状均为([1, 32, 16, 16])
        '''
        #拼接高频部分([1, 96, 16, 16])
        high = torch.cat([lh, hl, hh], 1)#高频信息
        #对高频部分进行卷积操作([1, 32, 16, 16])
        high1=self.convh1(high)
        #经过一次ResNet处理([1, 32, 16, 16])
        high2= self.high(high1)
        #还原回原来的形状([1, 96, 16, 16])
        highf=self.convh2(high2)#高频特征


        b1, c1, h1, w1 = ll.shape
        b2, c2, h2, w2 = x2.shape

        #
        if(h1!=h2):
            x2 =F.pad(x2, (0, 0, 1, 0), "constant", 0)

        #将ll和调整后的x2在通道维度上进行拼接([1, 64, 16, 16])
        low=torch.cat([ll, x2], 1)
        #使用convl对拼接后的低频部分进行卷积操作([1, 32, 16, 16])
        low = self.convl(low)
        #通过残差网络处理([1, 32, 16, 16])
        lowf=self.low(low)
        #将处理后的高频部分和低频部分在通道维度上进行拼接([1, 128, 16, 16])
        out = torch.cat((lowf, highf), 1)
        #对拼接后的结果进行2D逆小波变换([1, 32, 32, 32])
        out_idwt = self.idwt(out)

        return out_idwt

'''
UNet模块
作用:
    构建多尺度特征提取与融合的编码器-解码器结构（对应论文中的Global Feature Extraction Block（低频特征提取）模块）
实现：
    编码器:通过Transformer块处理输入，再平均池化下采样，共3个尺度
    解码器:通过Fusion模块将高层特征(小尺度)与底层特征(大尺度)融合，逐步恢复空间尺寸
    输出:最终特征与输入残差连接(out+x)，并通过补零调整尺寸匹配
'''

class UNet(nn.Module):
    def __init__(self, in_channels, wave):
        super(UNet, self).__init__()
        # Define the layers
        self.trans1 = TransformerBlock(in_channels,8, 2.66, False, 'WithBias')
        self.trans2 = TransformerBlock(in_channels,8, 2.66, False, 'WithBias')
        self.trans3 = TransformerBlock(in_channels,8, 2.66, False, 'WithBias')
        self.avgpool1 = nn.AvgPool2d(kernel_size=2)
        self.avgpool2 = nn.AvgPool2d(kernel_size=2)

        self.upsample1 = Fusion(in_channels, wave)
        self.upsample2 = Fusion(in_channels, wave)


    def forward(self, x):
        x1=x
        # print(x1.shape)
        #将x1经过一层transformer块处理
        x1_r = self.trans1(x)
        #平均池化下采样
        x2 = self.avgpool1(x1)
        # print(x2.shape)
        #将x2经过一层transformer块处理
        x2_r = self.trans2(x2)
        #平均池化下采样
        x3 = self.avgpool2(x2)
        # print(x3.shape)
        #将x3经过一次transformer块处理
        x3_r = self.trans3(x3)
        #将x3_r与x2_r进行多尺度特征融合操作
        x4 = self.upsample1(x2_r,x3_r)

        #将x1_r与x4进行多尺度特征融合操作
        out=self.upsample2(x1_r,x4)
        b1, c1, h1, w1 = out.shape
        b2, c2, h2, w2 = x.shape

        if (h1 != h2):
            out = F.pad(out, (0, 0, 1, 0), "constant", 0)

        return out+x




##########################################################################
## Layer Norm

def to_3d(x):
    return rearrange(x, 'b c h w -> b (h w) c')

def to_4d(x,h,w):
    return rearrange(x, 'b (h w) c -> b c h w',h=h,w=w)

class BiasFree_LayerNorm(nn.Module):
    def __init__(self, normalized_shape):
        super(BiasFree_LayerNorm, self).__init__()
        if isinstance(normalized_shape, numbers.Integral):
            normalized_shape = (normalized_shape,)
        normalized_shape = torch.Size(normalized_shape)

        assert len(normalized_shape) == 1

        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.normalized_shape = normalized_shape

    def forward(self, x):
        sigma = x.var(-1, keepdim=True, unbiased=False)
        return x / torch.sqrt(sigma+1e-5) * self.weight


class LayerNorm(nn.Module):
    def __init__(self, dim, LayerNorm_type):
        super(LayerNorm, self).__init__()
        if LayerNorm_type == 'BiasFree':
            self.body = BiasFree_LayerNorm(dim)
        else:
            self.body = WithBias_LayerNorm(dim)

    def forward(self, x):
        h, w = x.shape[-2:]
        return to_4d(self.body(to_3d(x)), h, w)


class WithBias_LayerNorm(nn.Module):
    def __init__(self, normalized_shape):
        super(WithBias_LayerNorm, self).__init__()
        if isinstance(normalized_shape, numbers.Integral):
            normalized_shape = (normalized_shape,)
        normalized_shape = torch.Size(normalized_shape)

        assert len(normalized_shape) == 1

        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.bias = nn.Parameter(torch.zeros(normalized_shape))
        self.normalized_shape = normalized_shape

    def forward(self, x):
        mu = x.mean(-1, keepdim=True)
        sigma = x.var(-1, keepdim=True, unbiased=False)
        return (x - mu) / torch.sqrt(sigma+1e-5) * self.weight + self.bias

##########################################################################
## Gated-Dconv Feed-Forward Network (GDFN)
class FeedForward(nn.Module):
    def __init__(self, dim, ffn_expansion_factor, bias):
        super(FeedForward, self).__init__()

        hidden_features = int(dim * ffn_expansion_factor)

        self.project_in = nn.Conv2d(dim, hidden_features * 2, kernel_size=1, bias=bias)

        self.dwconv = nn.Conv2d(hidden_features * 2, hidden_features * 2, kernel_size=3, stride=1, padding=1,
                                groups=hidden_features * 2, bias=bias)

        self.project_out = nn.Conv2d(hidden_features, dim, kernel_size=1, bias=bias)

    def forward(self, x):
        x = self.project_in(x)
        x1, x2 = self.dwconv(x).chunk(2, dim=1)
        x = F.gelu(x1) * x2
        x = self.project_out(x)
        return x


##########################################################################
## Multi-DConv Head Transposed Self-Attention (MDTA)
class Attention(nn.Module):
    def __init__(self, dim, num_heads, bias):
        super(Attention, self).__init__()
        self.num_heads = num_heads
        self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))

        self.qkv = nn.Conv2d(dim, dim * 3, kernel_size=1, bias=bias)
        self.qkv_dwconv = nn.Conv2d(dim * 3, dim * 3, kernel_size=3, stride=1, padding=1, groups=dim * 3, bias=bias)
        self.project_out = nn.Conv2d(dim, dim, kernel_size=1, bias=bias)

    def forward(self, x):
        b, c, h, w = x.shape

        qkv = self.qkv_dwconv(self.qkv(x))
        q, k, v = qkv.chunk(3, dim=1)

        q = rearrange(q, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
        k = rearrange(k, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
        v = rearrange(v, 'b (head c) h w -> b head c (h w)', head=self.num_heads)

        q = torch.nn.functional.normalize(q, dim=-1)
        k = torch.nn.functional.normalize(k, dim=-1)

        attn = (q @ k.transpose(-2, -1)) * self.temperature
        attn = attn.softmax(dim=-1)

        out = (attn @ v)

        out = rearrange(out, 'b head c (h w) -> b (head c) h w', head=self.num_heads, h=h, w=w)

        out = self.project_out(out)
        return out


##########################################################################
'''
Transformer块
作用:
    引入自注意力机制捕捉长距离依赖，结合前馈神经网络增强特征表达
组件:
    LayerNorm:支持带偏置(WithBias)和无偏置(BiaFree)两种模式，对通道维度归一化(先将特征展平为序列，归一化后再恢复空间维度)
    注意力机制(Attention):
        用1x1卷积生成Q、K、V,再通过深度卷积(3x3,group=dim*3)增强局部特征。
        按头拆分后计算缩放点积注意力，最后通过1x1卷积输出
    前馈网络(FeedForward):
        先升维到dim*ffn_expansion_factor,通过深度卷积处理后拆分为两路,用GELU激活后逐元素相乘,最后降维回原通道数
结构:
    x+Attention(LayerNorm(x))->x+FeedForward(LayerNorm(x))(残差+归一化的标准Transformer结构)
'''
class TransformerBlock(nn.Module):
    def __init__(self, dim, num_heads, ffn_expansion_factor, bias, LayerNorm_type):
        super(TransformerBlock, self).__init__()

        self.norm1 = LayerNorm(dim, LayerNorm_type)
        self.attn = Attention(dim, num_heads, bias)
        self.norm2 = LayerNorm(dim, LayerNorm_type)
        self.ffn = FeedForward(dim, ffn_expansion_factor, bias)

    def forward(self, x):
        x = x + self.attn(self.norm1(x))
        x = x + self.ffn(self.norm2(x))

        return x



'''
高低频处理模块
作用:
    分离输入特征的高频和低频成分，分别处理后融合
实现:
    用平均池化得到低频分量low,通过"输入-上采样低频"得到高频分量high
    低频分量low经过UNet处理后上采样回原尺寸
    高频分量high经Dense块处理
    高低频分量拼接后，经过1x1卷积降维、通道注意力加权、3x3卷积，最后与输入残差连接
'''
class HPB(nn.Module):
    def __init__(self, n_feats, wave):
        super(HPB, self).__init__()
        self.down = nn.AvgPool2d(kernel_size=2)
        self.dense=Dense(n_feats)
        self.unet=UNet(n_feats, wave)


        self.alise1= nn.Conv2d(2 * n_feats, n_feats, 1, 1, 0)  # one_module(n_feats)
        self.alise2 = nn.Conv2d(n_feats, n_feats, 3, 1, 1)  # one_module(n_feats)

        self.att = CALayer(n_feats)


    def forward(self, x):
        low = self.down(x)
        high = x - F.interpolate(low, size=x.size()[-2:], mode='bilinear', align_corners=True)
        #低频特征提取
        lowf=self.unet(low)# 处理后的低频特征（下采样尺寸）
        #高频特征提取
        highfeat = self.dense(high)# 处理后的高频特征（原始尺寸）
        lowfeat = F.interpolate(lowf, size=x.size()[-2:], mode='bilinear', align_corners=True) # 上采样后的低频特征（原始尺寸）

        out=self.alise2(self.att(self.alise1(torch.cat([highfeat, lowfeat], dim=1)))) + x

        return out # lowfeat是上采样到原始尺寸的低频特征，与highfeat尺寸一致

    def down_sep(self, x):
        """单独进行高低频分离，返回低频和高频分量"""
        low = self.down(x)
        high = x - F.interpolate(low, size=x.size()[-2:], mode='bilinear', align_corners=True)
        return low, high


'''
整体网络
作用：
    堆叠多个HPB模块，融合多阶段特征
实现：
    3个HPB模块串联(每个用haar小波)，输出分别为x1,x2,x3
    拼接x1,x2,x3后通过1x1卷积降维，得到最终输出
'''
class UN(nn.Module):
    def __init__(self, n_feats):
        super(UN, self).__init__()

        self.encoder1 = HPB(n_feats, 'haar')
        self.encoder2 = HPB(n_feats, 'haar')
        self.encoder3 = HPB(n_feats, 'haar')
        self.conv=nn.Conv2d(n_feats * 3, n_feats, kernel_size=1)



    def forward(self, x):


        x1 = self.encoder1(x)
        x2 = self.encoder2(x1)
        x3 = self.encoder3(x2)
        out=torch.cat((x1,x2,x3),1)
        out=self.conv(out)

        return out



