import torch.nn as nn
from .cnn import MultiScaleCNN
from .attention import MultiHeadAttention

class EnhancedCNNLSTMAttention(nn.Module):
    def __init__(self, config):
        super().__init__()
        
        # 增加特征归一化
        self.input_norm = nn.LayerNorm(config.INPUT_DIM)
        self.feature_norm = nn.BatchNorm1d(config.INPUT_DIM)
        
        # 增加残差连接
        self.cnn = nn.Sequential(
            MultiScaleCNN(config, out_channels=32),
            nn.LayerNorm(96),
            nn.Dropout(0.2)  # 降低dropout
        )
        
        # 添加残差连接
        self.residual = nn.Linear(config.INPUT_DIM, 96)
        
        # 保持GRU结构
        self.gru = nn.GRU(
            input_size=96,
            hidden_size=96,
            num_layers=1,
            batch_first=True,
            bidirectional=False
        )
        
        # 注意力机制
        self.attention = MultiHeadAttention(config, num_heads=4)
        
        # 简化输出层
        self.fc = nn.Sequential(
            nn.Linear(96, 48),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(48, config.OUTPUT_DIM)
        )
    
    def forward(self, x):
        # 双重归一化
        x = self.input_norm(x)
        x = x.transpose(1, 2)
        x = self.feature_norm(x)
        x = x.transpose(1, 2)
        
        # 残差连接
        identity = self.residual(x)
        
        # 主干网络
        cnn_out = self.cnn(x)
        gru_out, _ = self.gru(cnn_out)
        
        # 添加残差
        gru_out = gru_out + identity
        
        # 注意力和输出
        attn_out = self.attention(gru_out, x.size(1))
        out = self.fc(attn_out[:, -1, :])
        return out 