import torch
from torch import nn
class LSTM_v2(nn.Module):
    def __init__(self):
        super(LSTM_v2, self).__init__()
        INT_HIDDEN_SIZE = 64  # 增加隐藏层大小
        INT_NUM_LAYERS = 2  # 增加LSTM层数
        CONST_INT_INPUT_SIZE = 5  # [x, y, theta, lstim, rstim]
        CONST_INT_OUTPUT_SIZE = 3  # [x, y, theta]
        CONST_BOOL_BATCH_FIRST = True
        
        self.lstm = nn.LSTM(
            input_size=CONST_INT_INPUT_SIZE,
            hidden_size=INT_HIDDEN_SIZE,
            num_layers=INT_NUM_LAYERS,
            batch_first=CONST_BOOL_BATCH_FIRST,
            dropout=0.2 if INT_NUM_LAYERS > 1 else 0  # 多层LSTM时添加dropout
        )
        self.fc1 = nn.Linear(INT_HIDDEN_SIZE, INT_HIDDEN_SIZE // 2)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.1)
        self.fc2 = nn.Linear(INT_HIDDEN_SIZE // 2, CONST_INT_OUTPUT_SIZE)

    def forward(self, x):
        lstm_out, _ = self.lstm(x)
        last_output = lstm_out[:, -1, :]
        x = self.fc1(last_output)
        x = self.relu(x)
        x = self.dropout(x)
        output = self.fc2(x)
        return output

def get_model():
    return LSTM_v2()