"""
使用LSTM长短期记忆模型进行训练
目标是将hello->ohlol
"""

import torch
import torch.nn as nn

# 索引查找字符
idx2char = ['h', 'e', 'l', 'o']
# 将hello对应idx2char转换为索引,输入的数据需要是数字
x_data = [0, 1, 2, 2, 3]
# 将ohlol转换到数字
y_data = [3, 0, 2, 3, 2]

# 输入的维度
input_size = len(idx2char)
# 隐层的维度
hidden_size = 8
# embedding层的维度
embed_size = 10
# mini-batch大小,此处因为数据量小,所以一批输入即可
batch_size = 1
# LSTM的层数
num_layers = 1

# 转换x_data到输入
inputs = torch.tensor(x_data).view(batch_size, -1)
# 转换y_data到labels
labels = torch.tensor(y_data)


class MyLSTM(nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        # 嵌入层
        self.embedding = nn.Embedding(input_size, embed_size)
        # LSTM
        self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
        # 全链接转换
        self.fc = nn.Linear(hidden_size, input_size)

    def forward(self, input):
        # 计算嵌入层,此时输出(batchSize,seqSize,embedSize)
        embedded = self.embedding(input)
        # 初始化最初的h0和c0
        h0 = torch.zeros(num_layers, batch_size, hidden_size)
        c0 = torch.zeros(num_layers, batch_size, hidden_size)
        # 计算LSTM
        output, _ = self.lstm(embedded, (h0, c0))
        # 计算线性层
        output = self.fc(output)
        # 转换到(batchSize*seqSize,hidden_size)
        return output.view(-1, input_size)


# 初始化模型
model = MyLSTM()
# 初始化loss
criterion = nn.CrossEntropyLoss()
# 初始化优化器
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)

for epoch in range(20):
    # 清除梯度
    optimizer.zero_grad()
    # 计算前馈
    y_pred = model(inputs)
    # 计算loss
    loss = criterion(y_pred, labels)
    # 反向传播
    loss.backward()
    # 权重更新
    optimizer.step()

    # 获得最大值索引
    _, idx = y_pred.max(dim=1)
    # 输出此次结果
    print(f"epoch: {epoch}, loss: {loss.item()}, {''.join([idx2char[i] for i in idx])}")
