import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Optional, Tuple

class MultiScaleCNN(nn.Module):
    """
    多尺度CNN模块，用于提取不同粒度的局部特征
    """
    def __init__(self, embed_dim: int, num_filters: int = 100, filter_sizes: list = [1, 3, 5]):
        super(MultiScaleCNN, self).__init__()
        self.embed_dim = embed_dim
        self.num_filters = num_filters
        self.filter_sizes = filter_sizes
        
        # 不同尺寸的卷积层 - 修正填充计算
        self.convs = nn.ModuleList([
            nn.Conv1d(embed_dim, num_filters, kernel_size=fs, padding=(fs-1)//2)
            for fs in filter_sizes
        ])
        
        # 批归一化层
        self.batch_norms = nn.ModuleList([
            nn.BatchNorm1d(num_filters) for _ in filter_sizes
        ])
        
        # 特征融合层
        self.feature_fusion = nn.Linear(num_filters * len(filter_sizes), embed_dim)
        self.dropout = nn.Dropout(0.1)
        
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Args:
            x: [batch_size, seq_len, embed_dim]
        Returns:
            cnn_features: [batch_size, seq_len, embed_dim]
        """
        # 转置以适应Conv1d的输入格式 [batch_size, embed_dim, seq_len]
        x = x.transpose(1, 2)
        
        # conv_outputs = []
        # for conv, bn in zip(self.convs, self.batch_norms):
        #     # 卷积 + 批归一化 + ReLU
        #     conv_out = F.relu(bn(conv(x)))  # [batch_size, num_filters, seq_len]
        #     conv_outputs.append(conv_out)
        conv_outputs = []
        for i, (conv, bn) in enumerate(zip(self.convs, self.batch_norms)):
            conv_out = F.relu(bn(conv(x)))
            # 添加长度检查
            if conv_out.size(2) != x.size(2):
                print(f"Warning: Conv layer {i} with kernel size {self.filter_sizes[i]} "
                    f"output length {conv_out.size(2)} != input length {x.size(2)}")
            conv_outputs.append(conv_out)
        
        # 拼接不同尺寸的特征
        combined_features = torch.cat(conv_outputs, dim=1)  # [batch_size, num_filters*len(filter_sizes), seq_len]
        
        # 转置回原格式
        combined_features = combined_features.transpose(1, 2)  # [batch_size, seq_len, num_filters*len(filter_sizes)]
        
        # 特征融合到原始维度
        fused_features = self.feature_fusion(combined_features)  # [batch_size, seq_len, embed_dim]
        fused_features = self.dropout(fused_features)
        
        return fused_features

class PositionalEncoding(nn.Module):
    def __init__(self, embed_dim: int, max_len: int = 5000):
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(max_len, embed_dim)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, embed_dim, 2).float() * 
                           (-math.log(10000.0) / embed_dim))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)  # 修改为 [1, max_len, embed_dim]
        self.register_buffer('pe', pe)
        
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # 获取序列长度
        seq_len = x.size(1)
        # 只取前 seq_len 个位置编码
        return x + self.pe[:, :seq_len]

class CNNEnhancedAttention(nn.Module):
    """
    CNN增强的注意力机制
    Query来源于CNN特征，Key和Value来源于原始特征
    """
    def __init__(self, embed_dim: int, num_heads: int = 8, dropout: float = 0.1):
        super(CNNEnhancedAttention, self).__init__()
        assert embed_dim % num_heads == 0
        
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.head_dim = embed_dim // num_heads
        self.scale = self.head_dim ** -0.5
        
        # CNN特征用于生成Query
        self.query_proj = nn.Linear(embed_dim, embed_dim)
        # 原始特征用于生成Key和Value
        self.key_proj = nn.Linear(embed_dim, embed_dim)
        self.value_proj = nn.Linear(embed_dim, embed_dim)
        
        self.out_proj = nn.Linear(embed_dim, embed_dim)
        self.dropout = nn.Dropout(dropout)
        
    def forward(self, 
                cnn_features: torch.Tensor, 
                original_features: torch.Tensor,
                mask: Optional[torch.Tensor] = None) -> torch.Tensor:
        """
        Args:
            cnn_features: [batch_size, seq_len, embed_dim] - CNN提取的特征作为Query
            original_features: [batch_size, seq_len, embed_dim] - 原始特征作为Key和Value
            mask: [batch_size, seq_len] - 可选的掩码
        Returns:
            attended_features: [batch_size, seq_len, embed_dim]
        """
        batch_size, seq_len, embed_dim = original_features.size()
        
        # 生成Q, K, V
        Q = self.query_proj(cnn_features).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
        K = self.key_proj(original_features).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
        V = self.value_proj(original_features).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
        
        # 计算注意力分数
        scores = torch.matmul(Q, K.transpose(-2, -1)) * self.scale  # [batch_size, num_heads, seq_len, seq_len]
        
        # 应用掩码（如果有）
        if mask is not None:
            mask = mask.unsqueeze(1).unsqueeze(1)  # [batch_size, 1, 1, seq_len]
            scores.masked_fill_(mask == 0, -1e9)
        
        # 注意力权重
        attn_weights = F.softmax(scores, dim=-1)
        attn_weights = self.dropout(attn_weights)
        
        # 应用注意力权重
        attended = torch.matmul(attn_weights, V)  # [batch_size, num_heads, seq_len, head_dim]
        
        # 重新组织维度
        attended = attended.transpose(1, 2).contiguous().view(batch_size, seq_len, embed_dim)
        
        # 输出投影
        output = self.out_proj(attended)
        
        return output, attn_weights

class TransformerBlock(nn.Module):
    """
    增强的Transformer块
    """
    def __init__(self, embed_dim: int, num_heads: int = 8, ff_dim: int = 2048, dropout: float = 0.1):
        super(TransformerBlock, self).__init__()
        
        self.cnn_enhanced_attention = CNNEnhancedAttention(embed_dim, num_heads, dropout)
        self.norm1 = nn.LayerNorm(embed_dim)
        self.norm2 = nn.LayerNorm(embed_dim)
        
        # 前馈网络
        self.ff_network = nn.Sequential(
            nn.Linear(embed_dim, ff_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(ff_dim, embed_dim),
            nn.Dropout(dropout)
        )
        
    def forward(self, 
                cnn_features: torch.Tensor, 
                original_features: torch.Tensor,
                mask: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
        """
        Args:
            cnn_features: CNN提取的特征
            original_features: 原始输入特征
            mask: 注意力掩码
        Returns:
            output: 变换后的特征
            attn_weights: 注意力权重
        """
        # Self-attention with residual connection
        attended_features, attn_weights = self.cnn_enhanced_attention(cnn_features, original_features, mask)
        x = self.norm1(original_features + attended_features)
        
        # Feed forward with residual connection
        ff_output = self.ff_network(x)
        output = self.norm2(x + ff_output)
        
        return output, attn_weights

class CNNEnhancedTransformer(nn.Module):
    """
    CNN增强的Transformer模型主体
    """
    def __init__(self, 
                 vocab_size: int,
                 embed_dim: int = 768,
                 num_heads: int = 8,
                 num_layers: int = 6,
                 ff_dim: int = 2048,
                 max_len: int = 512,
                 num_classes: int = 2,
                 dropout: float = 0.1,
                 cnn_filters: int = 100,
                 filter_sizes: list = [1, 3, 5]):
        super(CNNEnhancedTransformer, self).__init__()
        
        self.embed_dim = embed_dim
        self.num_classes = num_classes
        
        # 词嵌入层
        self.token_embedding = nn.Embedding(vocab_size, embed_dim)
        self.pos_encoding = PositionalEncoding(embed_dim, max_len)
        
        # CNN特征提取模块
        self.cnn_extractor = MultiScaleCNN(embed_dim, cnn_filters, filter_sizes)
        
        # Transformer层
        self.transformer_blocks = nn.ModuleList([
            TransformerBlock(embed_dim, num_heads, ff_dim, dropout)
            for _ in range(num_layers)
        ])
        
        # 分类头
        self.classifier = nn.Sequential(
            nn.Dropout(dropout),
            nn.Linear(embed_dim, embed_dim // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(embed_dim // 2, num_classes)
        )
        
        self.dropout = nn.Dropout(dropout)
        
        # 初始化权重
        self._init_weights()
        
    def _init_weights(self):
        """初始化模型权重"""
        for module in self.modules():
            if isinstance(module, nn.Linear):
                nn.init.xavier_uniform_(module.weight)
                if module.bias is not None:
                    nn.init.zeros_(module.bias)
            elif isinstance(module, nn.Embedding):
                nn.init.normal_(module.weight, std=0.02)
    
    def forward(self, 
                input_ids: torch.Tensor,
                attention_mask: Optional[torch.Tensor] = None) -> dict:
        """
        Args:
            input_ids: [batch_size, seq_len]
            attention_mask: [batch_size, seq_len]
        Returns:
            dict containing logits, attention_weights, and hidden_states
        """
        # 词嵌入 + 位置编码
        embedded = self.token_embedding(input_ids)  # [batch_size, seq_len, embed_dim]
        embedded = self.pos_encoding(embedded)
        embedded = self.dropout(embedded)
        
        # CNN特征提取
        cnn_features = self.cnn_extractor(embedded)  # [batch_size, seq_len, embed_dim]
        
        # 保存原始特征
        hidden_states = embedded
        all_attention_weights = []
        
        # 通过Transformer层
        for transformer_block in self.transformer_blocks:
            hidden_states, attn_weights = transformer_block(cnn_features, hidden_states, attention_mask)
            all_attention_weights.append(attn_weights)
        
        # 池化策略：使用[CLS] token或平均池化
        if attention_mask is not None:
            # 掩码平均池化
            mask_expanded = attention_mask.unsqueeze(-1).float()
            pooled_output = (hidden_states * mask_expanded).sum(dim=1) / mask_expanded.sum(dim=1)
        else:
            # 简单平均池化
            pooled_output = hidden_states.mean(dim=1)
        
        # 分类
        logits = self.classifier(pooled_output)  # [batch_size, num_classes]
        
        return {
            'logits': logits,
            'hidden_states': hidden_states,
            'attention_weights': all_attention_weights,
            'cnn_features': cnn_features
        }

# 辅助函数：创建不同配置的模型
def create_cet_model(model_config: str = 'base', num_classes: int = 2, vocab_size: int = 30522):
    """
    创建不同配置的CET模型
    
    Args:
        model_config: 'small', 'base', 'large'
        num_classes: 分类类别数
        vocab_size: 词汇表大小
    
    Returns:
        CNNEnhancedTransformer model
    """
    configs = {
        'small': {
            'embed_dim': 256,
            'num_heads': 4,
            'num_layers': 4,
            'ff_dim': 1024,
            'cnn_filters': 64
        },
        'base': {
            'embed_dim': 768,
            'num_heads': 8,
            'num_layers': 6,
            'ff_dim': 2048,
            'cnn_filters': 100
        },
        'large': {
            'embed_dim': 1024,
            'num_heads': 16,
            'num_layers': 12,
            'ff_dim': 4096,
            'cnn_filters': 128
        }
    }
    
    config = configs.get(model_config, configs['base'])
    
    return CNNEnhancedTransformer(
        vocab_size=vocab_size,
        embed_dim=config['embed_dim'],
        num_heads=config['num_heads'],
        num_layers=config['num_layers'],
        ff_dim=config['ff_dim'],
        num_classes=num_classes,
        cnn_filters=config['cnn_filters']
    )

# 模型参数统计
def count_parameters(model):
    """统计模型参数数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)

# 测试代码
if __name__ == "__main__":
    # 创建模型实例
    model = create_cet_model('base', num_classes=2, vocab_size=30522)
    print(f"Model parameters: {count_parameters(model):,}")
    
    # 测试前向传播
    batch_size, seq_len = 4, 128
    input_ids = torch.randint(0, 30522, (batch_size, seq_len))
    attention_mask = torch.ones(batch_size, seq_len)
    
    with torch.no_grad():
        outputs = model(input_ids, attention_mask)
        print(f"Output logits shape: {outputs['logits'].shape}")
        print(f"Hidden states shape: {outputs['hidden_states'].shape}")
        print(f"Number of attention layers: {len(outputs['attention_weights'])}")
