import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['AFECM']

class AdaptiveFusionGate(nn.Module):
    """自适应融合门控"""
    def __init__(self, dim, dilations=[1, 2, 3]):
        super().__init__()

        self.branches = nn.ModuleList([
            nn.Sequential(
                nn.Conv2d(dim, dim, 3, padding=d, dilation=d, groups=dim),
                nn.Conv2d(dim, dim, 1)
            ) for d in dilations
        ])
        

        self.weight_gen = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(dim, len(dilations), 1),  # 直接输出K通道
            nn.Softmax(dim=1)
        )

    def forward(self, x):

        features = [branch(x) for branch in self.branches]
        
        
        weights = self.weight_gen(x)
        
     
        return sum(
            weights[:, i:i+1] * features[i]  # 保持4D广播
            for i in range(len(features))
        )

class FrequencyAwareBlock(nn.Module):
    """频域感知模块"""
    def __init__(self, dim):
        super().__init__()
        # 确保所有层输入为4D
        self.low_pass = nn.Sequential(
            nn.Conv2d(dim, dim, 3, padding=1, groups=dim),
            nn.AvgPool2d(3, stride=1, padding=1)
        )
        self.high_enhance = nn.Sequential(
            nn.Conv2d(dim, dim, 3, padding=1, groups=dim),
            nn.Tanh()
        )
        self.gate = nn.Sequential(
            nn.Conv2d(dim, 1, 1),  # 输出单通道mask
            nn.Sigmoid()
        )

    def forward(self, x):
        # 严格保持4D操作
        low = self.low_pass(x)
        high = self.high_enhance(x - low)
        return x + high * self.gate(x)  # 广播mask到所有通道

class AFECM(nn.Module):
    """AFECM模块"""
    def __init__(self, inc, outc, n=2):
        super().__init__()
        self.proj = nn.Conv2d(inc, outc, 1) if inc != outc else nn.Identity()
        
        # 堆叠处理单元（确保每个单元保持维度）
        self.blocks = nn.Sequential(
            *[nn.Sequential(
                FrequencyAwareBlock(outc),
                AdaptiveFusionGate(outc)
            ) for _ in range(n)]
        )
        
        # 最终融合层
        self.fusion = nn.Conv2d(outc, outc, 3, padding=1, groups=outc)

    def forward(self, x):
        x = self.proj(x)
        return self.fusion(self.blocks(x)) + x

# # 严格维度测试
# if __name__ == "__main__":
#     # 测试案例1：标准4D输入
#     model = AFECM(64, 128, n=2)
#     x4d = torch.randn(2, 64, 32, 32)
#     try:
#         out4d = model(x4d)
#         print(f"4D测试通过！输入形状: {tuple(x4d.shape)} → 输出形状: {tuple(out4d.shape)}")
#     except Exception as e:
#         print(f"4D测试失败: {str(e)}")
    
#     # 测试案例2：处理展平后的5D输入
#     x5d = torch.randn(2, 3, 64, 32, 32)  # [B,T,C,H,W]
#     try:
#         # 正确展平方法：合并B和T维度
#         flattened = x5d.view(-1, 64, 32, 32)  # [2*3,64,32,32] = [6,64,32,32]
#         output = model(flattened)
#         print(f"5D展平测试通过！输出形状: {tuple(output.shape)}")
#     except Exception as e:
#         print(f"5D处理错误: {str(e)}")

# # YOLO集成示例
# class YOLO_AFECM(nn.Module):
#     def __init__(self):
#         super().__init__()
#         self.backbone = nn.Sequential(
#             nn.Conv2d(3, 32, 3, 2, 1),
#             AFECM(32, 64),
#             nn.Conv2d(64, 128, 3, 2, 1),
#             AFECM(128, 256, n=3)
#         )
#         self.head = nn.Conv2d(256, 10, 1)
    
#     def forward(self, x):
#         # 确保输入为标准4D格式
#         assert x.ndim == 4, f"输入必须为4D张量，实际维度: {x.shape}"
#         return self.head(self.backbone(x).mean([2,3]))