import torch
import torch.nn as nn


class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_units, num_layers):
        """
        初始化 LSTM 模型，定义 LSTM 层
        :param input_size: 输入特征维度
        :param hidden_units: 隐藏单元数量
        :param num_layers: LSTM 层数
        """
        super(LSTMModel, self).__init__()
        self.lstm = nn.LSTM(input_size, int(hidden_units), int(num_layers), batch_first=True)
        self.fc = nn.Linear(int(hidden_units), 1)
        # batch_first=True：输入的第一个维度是 batch 大小


    def forward(self, x):
        """
        前向传播函数
        :param x: 输入张量，形状为 (batch_size, sequence_length, input_size)
        :return: 输出张量，形状为 (batch_size, 1)
        """

        out, _ = self.lstm(x)  # LSTM 输出 (batch_size, sequence_length, hidden_units)

        out = self.fc(out[:, -1, :])  # 取最后一个时间步的输出并进行线性变换

        return out  # 返回形状为 (batch_size, 1) 的输出


if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # 假设输入数据的维度为 (batch_size, sequence_length, input_size)
    batch_size = 32
    sequence_length = 10
    input_size = 5
    hidden_units = 64
    num_layers = 2


    # 创建一个随机输入张量
    input_tensor = torch.randn(batch_size, sequence_length, input_size).to(device)


    # 创建 LSTM 模型实例
    model = LSTMModel(input_size, hidden_units, num_layers).to(device)


    # 前向传播
    output_tensor = model(input_tensor)