import torch
import torch.nn as nn
from pytorch_wavelets import DWTForward


class HWD(nn.Module):
    def __init__(self, in_ch, out_ch):
        super(HWD, self).__init__()
        self.wt = DWTForward(J=1, mode='zero', wave='haar')
        self.conv_bn_relu = nn.Sequential(
                                    nn.Conv2d(in_ch*4, out_ch, kernel_size=1, stride=1),
                                    nn.BatchNorm2d(out_ch),
                                    nn.ReLU(inplace=True),
                                    )
        
    def forward(self, x):
        yL, yH = self.wt(x)
        # print("here")
        y_HL = yH[0][:,:,0,::]
        y_LH = yH[0][:,:,1,::]
        y_HH = yH[0][:,:,2,::]
        x = torch.cat([yL, y_HL, y_LH, y_HH], dim=1)
        x = self.conv_bn_relu(x)

        return x

from ..modules.conv import Conv
class convHWD(nn.Module):
    def __init__(self, in_ch, out_ch, k = 3, s=2):
        super().__init__()
        mid = int(out_ch*0.5)
        self.hwd = HWD(mid, mid)
        self.conv3 = Conv(mid, mid,k=k, s=s)
        
    def forward(self, x):
        x1, x2 = torch.split(x, x.size(1) // 2, dim=1)
        x1 = self.hwd(x1)
        x2 = self.conv3(x2)
        x = torch.cat([x1, x2], dim=1)

        return x  

class ConvG7S(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1):
        super().__init__()
        # 1x1卷积（动态使用 kernel_size 和 stride）
        self.conv1 = nn.Sequential(
            nn.Conv2d(
                in_channels, 
                out_channels, 
                kernel_size=kernel_size, 
                stride=stride, 
                padding=kernel_size // 2  # 自动计算 padding
            ),
            nn.BatchNorm2d(out_channels),
            nn.SiLU()
        )
        mid_channels = out_channels // 2
        self.conv7 = nn.Sequential(
            nn.Conv2d(mid_channels, mid_channels, kernel_size=7, padding=3, stride=1),
            nn.BatchNorm2d(mid_channels),
            nn.SiLU()
        )
        self.identity = nn.Identity()

    def forward(self, x):
        x = self.conv1(x)
        # print(f"ConvG7S conv1 output shape: {x.shape}")  # 验证输出尺寸
        x1, x2 = torch.split(x, x.size(1) // 2, dim=1)
        x1 = self.conv7(x1)
        x2 = self.identity(x2)
        return torch.cat([x1, x2], dim=1)

import torch
from torch import nn
from torch.nn.parameter import Parameter
 
class ECA(nn.Module):
    """Constructs a ECA module.
    Args:
        channel: Number of channels of the input feature map
        k_size: Adaptive selection of kernel size
    """
    def __init__(self, channel, k_size=3):
        super(ECA, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False) 
        self.sigmoid = nn.Sigmoid()
 
    def forward(self, x):
        # feature descriptor on the global spatial information
        y = self.avg_pool(x)
 
        # Two different branches of ECA module
        y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
 
        # Multi-scale information fusion
        y = self.sigmoid(y)
 
        return x * y.expand_as(x)
class EBlock(nn.Module):
    """Efficient Channel Attention Block"""
    def __init__(self, dim, mlp_ratio=2.0):
        super().__init__()
        # 通道注意力模块
        self.eca = ECA(dim)
        # MLP模块
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.mlp = nn.Sequential(
            Conv(dim, mlp_hidden_dim, 1),
            Conv(mlp_hidden_dim, dim, 1, act=False)
        )
        self.apply(self._init_weights)

    def _init_weights(self, m):
        """初始化权重"""
        if isinstance(m, nn.Conv2d):
            nn.init.trunc_normal_(m.weight, std=0.02)
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        """前向传播"""
        x = x + self.eca(x)  # 通道注意力残差连接
        return x + self.mlp(x)  # MLP残差连接

class E2C2f(nn.Module):
    def __init__(self, c1, c2, n=1, a2=True, area=1, residual=False, mlp_ratio=2.0, e=0.5, g=1, shortcut=True):
        super().__init__()
        c_ = int(c2 * e)
        self.cv1 = Conv(c1, c_, 3, 1)
        self.cv2 = Conv((1 + n) * c_, c2, 1)
        self.gamma = nn.Parameter(0.01 * torch.ones(c2), requires_grad=True) if residual else None
        
        # 修改这里：当a2=True时使用EBlock，否则保持C3k
        self.m = nn.ModuleList(
            EBlock(c_, mlp_ratio=mlp_ratio) if a2 else  # 使用ECA模块
            C3k(c_, c_, 2, shortcut, g)
            for _ in range(n)
        )

    def forward(self, x):
        """前向传播保持不变"""
        y = [self.cv1(x)]
        y.extend(m(y[-1]) for m in self.m)
        y = self.cv2(torch.cat(y, 1))
        if self.gamma is not None:
            return x + self.gamma.view(-1, len(self.gamma), 1, 1) * y
        return y