import torch
import torch.nn as nn


# 定义 Encoder
class Encoder(nn.Module):
    def __init__(self, input_size, hidden_size):
        super(Encoder, self).__init__()
        self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True)

    # x：(batch_size, seq_len, input_size)
    # outputs：(batch_size, seq_len, hidden_size)
    # hidden：(num_layers * num_directions, batch_size, hidden_size)
    # cell：(num_layers * num_directions, batch_size, hidden_size)
    def forward(self, x):
        # outputs: 完整序列输出
        # 包含每个时间步的隐藏状态输出
        # 形状通常为 (seq_len, batch, num_directions * hidden_size)
        # 用于序列到序列的任务
        # hidden: 最后时间步的隐藏状态
        # 形状为 (num_layers * num_directions, batch, hidden_size)
        # 表示LSTM的最终隐藏状态，用于后续处理或作为下一个时间步的输入
        # cell: 最后时间步的细胞状态
        # 形状与hidden相同
        # 表示LSTM的长期记忆状态，在序列处理中保持信息传递
        outputs, (hidden, cell) = self.lstm(x)
        print("Encoder 输出形状:", outputs.shape)
        print(outputs)
        print("Encoder 隐藏状态形状:", hidden.shape)
        print(hidden)
        print("Encoder 细胞状态形状:", cell.shape)
        print(cell)
        return hidden, cell


# 定义 Decoder
class Decoder(nn.Module):
    def __init__(self, hidden_size, output_size):
        super(Decoder, self).__init__()
        self.input_size = output_size
        self.lstm = nn.LSTM(self.input_size, hidden_size, batch_first=True)
        # 在 Decoder 类中，output_size 作为 init 方法的参数传入，并被赋值给 self.output_size。
        # 而在创建 nn.Linear 层时，直接使用的是这个传入的 output_size 参数，而不是 self.output_size。
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, hidden, cell, seq_len):
        batch_size = hidden.shape[1]

        input_t = torch.zeros(batch_size, 1, self.input_size)

        outputs = []

        for _ in range(seq_len):
            out, (hidden, cell) = self.lstm(input_t, (hidden, cell))
            print("Decoder 输出形状:", out.shape)
            print(out)
            print("Decoder 隐藏状态形状:", hidden.shape)
            print(hidden)
            print("Decoder 细胞状态形状:", cell.shape)
            print(cell)
            out = self.fc(out)
            outputs.append(out)
            input_t = out
        return torch.cat(outputs, dim=1)


# Seq2Seq 封装
class Seq2Seq(nn.Module):
    def __init__(self, encoder, decoder):
        super(Seq2Seq, self).__init__()
        self.encoder = encoder
        self.decoder = decoder

    def forward(self, x):
        hidden, cell = self.encoder(x)
        seq_len = x.shape[1]
        return self.decoder(hidden, cell, seq_len)


if __name__ == '__main__':
    # 模拟输入：[batch=1, seq_len=4, input_dim=1]
    x = torch.tensor([[[1.0], [2.0], [3.0], [4.0]]], dtype=torch.float32)

    input_dim = 1
    hidden_dim = 8
    output_dim = 1

    encoder = Encoder(input_dim, hidden_dim)
    decoder = Decoder(hidden_dim, output_dim)
    model = Seq2Seq(encoder, decoder)

    y_pred = model(x)

    print("输入序列:\n", x.squeeze(-1))
    print("输出序列形状:", y_pred.shape)
    print("输出序列:\n", y_pred.detach().squeeze(-1))
