import torch
from torch import nn
class LSTM_v4(nn.Module):
    def __init__(self, dropout=0.25):
        super(LSTM_v4, self).__init__()
        INT_HIDDEN_SIZE = 256  # 大幅增加隐藏层大小
        INT_NUM_LAYERS = 3  # 增加LSTM层数
        CONST_INT_INPUT_SIZE = 5  # [x, y, theta, lstim, rstim]
        CONST_INT_OUTPUT_SIZE = 3  # [x, y, theta]
        CONST_BOOL_BATCH_FIRST = True
        
        self.dropout_rate = dropout
        
        # 添加初始特征提取层
        self.feature_extractor = nn.Sequential(
            nn.Linear(CONST_INT_INPUT_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # 添加一个双向LSTM来捕获序列中的双向信息
        self.bilstm = nn.LSTM(
            input_size=INT_HIDDEN_SIZE // 2,
            hidden_size=INT_HIDDEN_SIZE // 2,
            num_layers=2,
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=dropout if INT_NUM_LAYERS > 1 else 0,
            bidirectional=True
        )
        
        self.lstm = nn.LSTM(
            input_size=INT_HIDDEN_SIZE,
            hidden_size=INT_HIDDEN_SIZE,
            num_layers=INT_NUM_LAYERS + 1,  # 增加LSTM层数
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=dropout if INT_NUM_LAYERS > 1 else 0  # 多层LSTM时添加dropout
        )
        
        # 注意力机制，增强关键时间步的特征提取
        self.attention = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE),
            nn.Tanh(),
            nn.Linear(INT_HIDDEN_SIZE, 1),
            nn.Softmax(dim=1)
        )
        
        # 更深的全连接网络，带有残差连接
        self.fc_network = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE * 2, INT_HIDDEN_SIZE),  # *2是因为使用了注意力机制和最后一个时间步
            nn.LayerNorm(INT_HIDDEN_SIZE),
            nn.ReLU(),
            nn.Dropout(dropout),
            
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE),
            nn.LayerNorm(INT_HIDDEN_SIZE),
            nn.ReLU(),
            nn.Dropout(dropout),
            
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # 跳跃连接
        self.skip_connection = nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2)
        
        # 输出层
        self.output_layer = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(INT_HIDDEN_SIZE // 2, CONST_INT_OUTPUT_SIZE)
        )

    def forward(self, x):
        batch_size, seq_len, features = x.size()
        
        # 初始特征提取 - 应用于每个时间步
        x_reshaped = x.view(batch_size * seq_len, features)
        x_processed = self.feature_extractor(x_reshaped)
        x = x_processed.view(batch_size, seq_len, -1)
        
        # 先经过双向LSTM提取基础特征
        bilstm_out, _ = self.bilstm(x)
        
        # 再经过多层LSTM深入提取序列特征
        lstm_out, _ = self.lstm(bilstm_out)
        
        # 应用注意力机制
        attention_weights = self.attention(lstm_out)
        attended_out = torch.sum(attention_weights * lstm_out, dim=1)
        
        # 获取最后一个时间步的输出
        last_output = lstm_out[:, -1, :]
        
        # 组合注意力输出和最后一个时间步
        combined = torch.cat([attended_out, last_output], dim=1)
        
        # 通过全连接网络
        fc_out = self.fc_network(combined)
        
        # 跳跃连接 - 将最后一层LSTM的输出添加到全连接网络的输出
        skip_out = self.skip_connection(last_output)
        enhanced = fc_out + skip_out
        
        # 最终输出层
        # 增加全连接网络的深度和宽度
        hidden = torch.cat([enhanced, skip_out], dim=1)
        output = self.output_layer(hidden)
        
        return output
    
def get_model():
    return LSTM_v4()