"""
通用模块
包含所有模型共享的基础组件
"""
import torch
import torch.nn as nn
import torch.nn.functional as F


class ConvBlock(nn.Module):
    """
    标准卷积块：Conv + BN + Activation
    
    参数:
        in_channels: 输入通道数
        out_channels: 输出通道数
        kernel_size: 卷积核大小
        stride: 步长
        padding: 填充
        activation: 激活函数，默认ReLU
        use_bn: 是否使用BatchNorm
    """
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, 
                 activation=True, use_bn=True):
        super().__init__()
        
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=not use_bn)
        self.bn = nn.BatchNorm2d(out_channels) if use_bn else nn.Identity()
        
        if activation:
            self.act = nn.ReLU(inplace=True)
        else:
            self.act = nn.Identity()
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x: [B, C_in, H, W]
        
        返回:
            out: [B, C_out, H', W']
        """
        x = self.conv(x)
        x = self.bn(x)
        x = self.act(x)
        return x


class ResidualBlock(nn.Module):
    """
    残差块
    
    参数:
        in_channels: 输入通道数
        out_channels: 输出通道数
        stride: 步长
    """
    def __init__(self, in_channels, out_channels, stride=1):
        super().__init__()
        
        self.conv1 = ConvBlock(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        self.conv2 = ConvBlock(out_channels, out_channels, kernel_size=3, stride=1, padding=1, activation=False)
        
        # 下采样
        self.downsample = None
        if stride != 1 or in_channels != out_channels:
            self.downsample = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(out_channels)
            )
        
        self.relu = nn.ReLU(inplace=True)
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x: [B, C_in, H, W]
        
        返回:
            out: [B, C_out, H', W']
        """
        identity = x
        
        out = self.conv1(x)
        out = self.conv2(out)
        
        if self.downsample is not None:
            identity = self.downsample(x)
        
        out += identity
        out = self.relu(out)
        
        return out


class SEBlock(nn.Module):
    """
    Squeeze-and-Excitation 块
    
    通过全局平均池化和两个全连接层实现通道注意力
    
    参数:
        channels: 通道数
        reduction: 降维比例
    """
    def __init__(self, channels, reduction=16):
        super().__init__()
        
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channels, channels // reduction, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(channels // reduction, channels, bias=False),
            nn.Sigmoid()
        )
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x: [B, C, H, W]
        
        返回:
            out: [B, C, H, W]
        """
        b, c, _, _ = x.size()
        
        # Squeeze
        y = self.avg_pool(x).view(b, c)
        
        # Excitation
        y = self.fc(y).view(b, c, 1, 1)
        
        # Scale
        return x * y.expand_as(x)


class ChannelAttention(nn.Module):
    """
    通道注意力模块
    
    参数:
        in_channels: 输入通道数
        reduction: 降维比例
    """
    def __init__(self, in_channels, reduction=16):
        super().__init__()
        
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)
        
        self.fc = nn.Sequential(
            nn.Conv2d(in_channels, in_channels // reduction, 1, bias=False),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels // reduction, in_channels, 1, bias=False)
        )
        self.sigmoid = nn.Sigmoid()
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x: [B, C, H, W]
        
        返回:
            out: [B, C, H, W]
        """
        avg_out = self.fc(self.avg_pool(x))
        max_out = self.fc(self.max_pool(x))
        out = self.sigmoid(avg_out + max_out)
        return x * out


class SpatialAttention(nn.Module):
    """
    空间注意力模块
    
    参数:
        kernel_size: 卷积核大小
    """
    def __init__(self, kernel_size=7):
        super().__init__()
        
        padding = kernel_size // 2
        self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
        self.sigmoid = nn.Sigmoid()
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x: [B, C, H, W]
        
        返回:
            out: [B, C, H, W]
        """
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        x_cat = torch.cat([avg_out, max_out], dim=1)
        out = self.sigmoid(self.conv(x_cat))
        return x * out


class CBAM(nn.Module):
    """
    Convolutional Block Attention Module
    结合通道注意力和空间注意力
    
    参数:
        in_channels: 输入通道数
        reduction: 通道注意力降维比例
        kernel_size: 空间注意力卷积核大小
    """
    def __init__(self, in_channels, reduction=16, kernel_size=7):
        super().__init__()
        
        self.channel_attention = ChannelAttention(in_channels, reduction)
        self.spatial_attention = SpatialAttention(kernel_size)
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x: [B, C, H, W]
        
        返回:
            out: [B, C, H, W]
        """
        x = self.channel_attention(x)
        x = self.spatial_attention(x)
        return x


class FeatureFusion(nn.Module):
    """
    特征融合模块
    用于融合多个来源的特征
    
    参数:
        feature_dims: 各个特征的维度列表
        output_dim: 输出维度
        fusion_type: 融合方式 ('concat', 'add', 'attention')
    """
    def __init__(self, feature_dims, output_dim, fusion_type='concat'):
        super().__init__()
        
        self.fusion_type = fusion_type
        
        if fusion_type == 'concat':
            total_dim = sum(feature_dims)
            self.fc = nn.Sequential(
                nn.Linear(total_dim, output_dim),
                nn.ReLU(inplace=True)
            )
        elif fusion_type == 'add':
            assert len(set(feature_dims)) == 1, "All features must have same dimension for 'add' fusion"
            self.fc = nn.Sequential(
                nn.Linear(feature_dims[0], output_dim),
                nn.ReLU(inplace=True)
            )
        elif fusion_type == 'attention':
            self.attention_weights = nn.Parameter(torch.ones(len(feature_dims)))
            self.fc = nn.Sequential(
                nn.Linear(feature_dims[0], output_dim),
                nn.ReLU(inplace=True)
            )
        else:
            raise ValueError(f"Unknown fusion type: {fusion_type}")
    
    def forward(self, features):
        """
        前向传播
        
        参数:
            features: 特征列表 [feat1, feat2, ...]
        
        返回:
            out: 融合后的特征
        """
        if self.fusion_type == 'concat':
            x = torch.cat(features, dim=1)
            return self.fc(x)
        elif self.fusion_type == 'add':
            x = sum(features)
            return self.fc(x)
        elif self.fusion_type == 'attention':
            weights = F.softmax(self.attention_weights, dim=0)
            x = sum(w * f for w, f in zip(weights, features))
            return self.fc(x)


if __name__ == "__main__":
    # 测试通用模块
    print("Testing common modules...")
    
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 测试ConvBlock
    print("\n1. Testing ConvBlock...")
    conv_block = ConvBlock(64, 128, kernel_size=3, stride=1, padding=1).to(device)
    x = torch.randn(2, 64, 32, 32).to(device)
    out = conv_block(x)
    print(f"   Input: {x.shape} -> Output: {out.shape}")
    
    # 测试ResidualBlock
    print("\n2. Testing ResidualBlock...")
    res_block = ResidualBlock(64, 128, stride=2).to(device)
    x = torch.randn(2, 64, 32, 32).to(device)
    out = res_block(x)
    print(f"   Input: {x.shape} -> Output: {out.shape}")
    
    # 测试SEBlock
    print("\n3. Testing SEBlock...")
    se_block = SEBlock(128, reduction=16).to(device)
    x = torch.randn(2, 128, 16, 16).to(device)
    out = se_block(x)
    print(f"   Input: {x.shape} -> Output: {out.shape}")
    
    # 测试CBAM
    print("\n4. Testing CBAM...")
    cbam = CBAM(128, reduction=16).to(device)
    x = torch.randn(2, 128, 16, 16).to(device)
    out = cbam(x)
    print(f"   Input: {x.shape} -> Output: {out.shape}")
    
    # 测试FeatureFusion
    print("\n5. Testing FeatureFusion...")
    fusion = FeatureFusion([64, 32, 16], output_dim=128, fusion_type='concat').to(device)
    feat1 = torch.randn(2, 64).to(device)
    feat2 = torch.randn(2, 32).to(device)
    feat3 = torch.randn(2, 16).to(device)
    out = fusion([feat1, feat2, feat3])
    print(f"   Inputs: [64, 32, 16] -> Output: {out.shape}")
    
    print("\n✓ All common modules test passed!")