import torch
import torch.nn as nn

class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, num_layers=2, dropout=0.1, bidirectional=False):
        super(LSTMModel, self).__init__()
        """
        参数:
            input_size: 输入特征的数量
            hidden_size: 隐藏状态的特征数量
            output_size: 输出特征的数量
            num_layers: LSTM层的数量（默认: 3）
            dropout: 在LSTM层之间引入的Dropout层的概率（默认: 0.3）
            bidirectional: 是否使用双向LSTM（默认: True）
        """
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.bidirectional = bidirectional

        # LSTM层
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, dropout=dropout, bidirectional=bidirectional)

        # 全连接层
        if bidirectional:
            self.fc1 = nn.Linear(hidden_size * 2, hidden_size)
            self.fc2 = nn.Linear(hidden_size, hidden_size)
            self.fc3 = nn.Linear(hidden_size, hidden_size)
            self.fc4 = nn.Linear(hidden_size, output_size)
        else:
            self.fc1 = nn.Linear(hidden_size, hidden_size)
            self.fc2 = nn.Linear(hidden_size, hidden_size)
            self.fc3 = nn.Linear(hidden_size, hidden_size)
            self.fc4 = nn.Linear(hidden_size, output_size)

        self.dropout = nn.Dropout(dropout)
        self.relu = nn.ReLU()
        self.leaky_relu = nn.LeakyReLU(0.1)  # 使用LeakyReLU激活函数
        self.tanh = nn.Tanh()

    def forward(self, x):
        """
        参数:
            x: 输入张量，形状为 (batch_size, sequence_length, input_size)
        返回:
            output: 输出张量，形状为 (batch_size, output_size)
        """
        if not torch.is_tensor(x):
            x = torch.FloatTensor(x)
        if torch.cuda.is_available():
            x = x.to(torch.device("cuda:0"))

        # 初始化隐藏状态为零
        # 检查输入张量的维度，适当初始化隐藏状态
        if len(x.shape) == 2:  # 如果输入是2D张量 (batch_size, input_size)
            # 添加sequence_length维度
            x = x.unsqueeze(1)  # 变为 (batch_size, 1, input_size)
            
        h0 = torch.zeros(self.num_layers * (2 if self.bidirectional else 1), x.size(0), self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers * (2 if self.bidirectional else 1), x.size(0), self.hidden_size).to(x.device)

        # 前向传播LSTM
        out, _ = self.lstm(x, (h0, c0))  # out: 形状为 (batch_size, seq_length, hidden_size * 2 if bidirectional else hidden_size)

        # 解码最后一个时间步的隐藏状态
        out = self.dropout(out[:, -1, :])  # out: 形状为 (batch_size, hidden_size * 2 if bidirectional else hidden_size)
        out = self.relu(self.fc1(out))  # out: 形状为 (batch_size, hidden_size)
        out = self.dropout(self.fc2(out))
        out = self.relu(out)
        out = self.dropout(self.fc3(out))
        out = self.relu(out)
        out = self.fc4(out)

        return out

    def loss(self, x, x_label):
        """
        参数:
            x: 预测的输出张量
            x_label: 真实的输出张量
        返回:
            loss: 均方误差损失
        """
        if not torch.is_tensor(x_label):
            x_label = torch.FloatTensor(x_label)
        if torch.cuda.is_available():
            x_label = x_label.to(torch.device("cuda:0"))

        mseloss = nn.MSELoss()

        # 定义LSTM模型的损失
        x_loss = mseloss(x_label, x)

        return x_loss

