import torch
import torch.nn as nn
from qtorch.utils.device_utils import get_device, to_device

class LSTMModel(nn.Module):
    def __init__(
        self,
        input_size: int = 1,
        hidden_size: int = 64,
        output_size: int = 1,
        num_layers: int = 2
    ):
        super().__init__()
        self.lstm = nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True  # 输入数据格式为 [batch, seq_len, features]
        )
        self.fc = nn.Linear(hidden_size, output_size)
        
        # 将模型移至最优设备
        self.to(get_device())

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # 确保输入张量在正确的设备上
        x = to_device(x)
        out, _ = self.lstm(x)          # LSTM 输出形状: [batch, seq_len, hidden_size]
        out = self.fc(out[:, -1, :])   # 只取最后一个时间步的输出
        return out