import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence


class LSTMWithPadding(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, batch_size, device="cpu", dropout=0.0):
        super().__init__()
        self.device = device
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.batch_size = batch_size

        self.lstm = nn.LSTM(
            input_size=self.input_size,
            hidden_size=self.hidden_size,
            num_layers=self.num_layers,
            batch_first=True,
            bidirectional=False,
            dropout=dropout if num_layers > 1 else 0
        )

        self.h_dropout = nn.Dropout(dropout / 2)
        self.c_dropout = nn.Dropout(dropout / 2)

    def forward(self, input_seq, lengths=None):
        batch_size = input_seq.size(0)

        # 初始化隐藏状态
        h_0 = self.h_dropout(torch.randn(self.num_layers, batch_size, self.hidden_size).to(self.device))
        c_0 = self.c_dropout(torch.randn(self.num_layers, batch_size, self.hidden_size).to(self.device))

        # 如果提供了lengths，则使用pack_padded_sequence
        if lengths is not None:
            # 按长度降序排列
            lengths, perm_idx = lengths.sort(0, descending=True)
            input_seq = input_seq[perm_idx]

            # 打包序列
            packed_input = pack_padded_sequence(input_seq, lengths.cpu(), batch_first=True)

            # LSTM前向传播
            packed_output, (h, c) = self.lstm(packed_input, (h_0, c_0))

            # 解包序列
            output, _ = pad_packed_sequence(packed_output, batch_first=True)

            # 恢复原始顺序
            _, unperm_idx = perm_idx.sort(0)
            output = output[unperm_idx]
            h = h[:, unperm_idx]
        else:
            # 常规LSTM前向传播
            output, (h, c) = self.lstm(input_seq, (h_0, c_0))

        return output, h


class LSTMMainWithPadding(nn.Module):
    def __init__(self, input_size, output_len, lstm_hidden, lstm_layers, batch_size, device="cpu", dropout_rate=0.2):
        super(LSTMMainWithPadding, self).__init__()
        self.lstm_hidden = lstm_hidden
        self.lstm_layers = lstm_layers
        self.batch_size = batch_size
        self.device = device

        self.lstm = LSTMWithPadding(
            input_size,
            lstm_hidden,
            lstm_layers,
            batch_size,
            device,
            dropout=dropout_rate
        )

        self.dropout = nn.Dropout(dropout_rate)
        self.linear = nn.Linear(lstm_hidden, output_len)
        self.init_weights()

    def init_weights(self):
        nn.init.xavier_uniform_(self.linear.weight)
        nn.init.zeros_(self.linear.bias)

    def forward(self, input_seq, lengths=None):
        # 输入形状: (batch_size, seq_len, input_size)
        lstm_out, _ = self.lstm(input_seq, lengths)

        # 如果提供了lengths，取每个序列的最后一个有效输出
        if lengths is not None:
            last_indices = lengths - 1
            last_outputs = lstm_out[torch.arange(lstm_out.size(0)), last_indices]
        else:
            # 否则取最后一个时间步的输出
            last_outputs = lstm_out[:, -1, :]

        # 应用Dropout和线性层
        out = self.dropout(last_outputs)
        out = self.linear(out)

        return out