from torch import nn
import torch

CONST_INT_INPUT_SIZE = 5 # [x, y, theta, lstim, rstim]
CONST_INT_OUTPUT_SIZE = 3 # [x, y, theta]
CONST_INT_SEQ_LEN = 5 # consider that the next state is only affected by the history of 5s
CONST_BOOL_BATCH_FIRST = True # align with the output format of torch.constant_.data.DataLoader

INT_HIDDEN_SIZE = 768 # 大幅增加隐藏层大小
INT_NUM_LAYERS = 4 # 增加LSTM层数

class LSTM(nn.Module):
    def __init__(self, dropout=0.25):
        super(LSTM, self).__init__()
        self.dropout_rate = dropout
        
        # 添加初始特征提取层
        self.feature_extractor = nn.Sequential(
            nn.Linear(CONST_INT_INPUT_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # 添加一个双向LSTM来捕获序列中的双向信息
        self.bilstm = nn.LSTM(
            input_size=INT_HIDDEN_SIZE // 2,
            hidden_size=INT_HIDDEN_SIZE // 2,
            num_layers=2,
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=dropout if INT_NUM_LAYERS > 1 else 0,
            bidirectional=True
        )
        
        self.lstm = nn.LSTM(
            input_size=INT_HIDDEN_SIZE,
            hidden_size=INT_HIDDEN_SIZE,
            num_layers=INT_NUM_LAYERS + 1,  # 增加LSTM层数
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=dropout if INT_NUM_LAYERS > 1 else 0  # 多层LSTM时添加dropout
        )
        
        # 注意力机制，增强关键时间步的特征提取
        self.attention = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE),
            nn.Tanh(),
            nn.Linear(INT_HIDDEN_SIZE, 1),
            nn.Softmax(dim=1)
        )
        
        # 增加多层全连接网络
        # 更深的全连接网络，带有残差连接
        self.fc_network = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE * 2, INT_HIDDEN_SIZE),  # *2是因为使用了注意力机制和最后一个时间步
            nn.LayerNorm(INT_HIDDEN_SIZE),
            nn.ReLU(),
            nn.Dropout(dropout),
            
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE),
            nn.LayerNorm(INT_HIDDEN_SIZE),
            nn.ReLU(),
            nn.Dropout(dropout),
            
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # 跳跃连接
        self.skip_connection = nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2)
        
        # 输出层
        self.output_layer = nn.Sequential(
            nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2),
            nn.LayerNorm(INT_HIDDEN_SIZE // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(INT_HIDDEN_SIZE // 2, CONST_INT_OUTPUT_SIZE)
        )
        
        # 初始化权重
        self._init_weights()
        
    def _init_weights(self):
        for name, param in self.named_parameters():
            if 'weight' in name:
                if len(param.shape) >= 2:  # 确保参数至少是2维的
                    if 'lstm' in name or 'bilstm' in name:
                        nn.init.orthogonal_(param)  # LSTM层使用正交初始化
                    else:
                        nn.init.kaiming_normal_(param)  # 全连接层使用kaiming初始化
                else:
                    nn.init.normal_(param, mean=0.0, std=0.01)  # 1维参数使用正态分布初始化
            elif 'bias' in name:
                nn.init.constant_(param, 0)

    def forward(self, x):
        batch_size, seq_len, features = x.size()
        
        # 初始特征提取 - 应用于每个时间步
        x_reshaped = x.view(batch_size * seq_len, features)
        x_processed = self.feature_extractor(x_reshaped)
        x = x_processed.view(batch_size, seq_len, -1)
        
        # 先经过双向LSTM提取基础特征
        bilstm_out, _ = self.bilstm(x)
        
        # 再经过多层LSTM深入提取序列特征
        lstm_out, _ = self.lstm(bilstm_out)
        
        # 应用注意力机制
        attention_weights = self.attention(lstm_out)
        attended_out = torch.sum(attention_weights * lstm_out, dim=1)
        
        # 获取最后一个时间步的输出
        last_output = lstm_out[:, -1, :]
        
        # 组合注意力输出和最后一个时间步
        combined = torch.cat([attended_out, last_output], dim=1)
        
        # 通过全连接网络
        fc_out = self.fc_network(combined)
        
        # 跳跃连接 - 将最后一层LSTM的输出添加到全连接网络的输出
        skip_out = self.skip_connection(last_output)
        enhanced = fc_out + skip_out
        
        # 最终输出层
        # 增加全连接网络的深度和宽度
        hidden = torch.cat([enhanced, skip_out], dim=1)
        output = self.output_layer(hidden)
        
        return output

    @staticmethod
    def get_model(model_path):
        """
        获取LSTM模型实例并加载预训练权重
        Args:
            model_path (str): 预训练模型的路径
        Returns:
            LSTM: 返回加载了预训练权重的LSTM模型实例
        """
        model = LSTM()
        model.load_state_dict(torch.load(model_path, weights_only=True))
        model.eval()
        return model
    
def get_model():
    return LSTM()