"""
使用RNN中的GRU(Gated Recurrent Unit门控制单元)
目标是将hello->ohlol
对字符映射,而且和前面的字符相关
"""
import numpy as np
import torch
import torch.nn as nn

# 字符映射
idx2char = ["h", "e", "l", "o"]
# 将字符转为数字
x_data = [0, 1, 2, 2, 3]
y_data = [3, 0, 2, 3, 2]

# 输入的特征维度
input_size = len(idx2char)
# 隐层的特征维度,这里直接指定到和输入的一致,结果可以不用再用全链接做变化
hidden_size = 4
# 一批的数量
batch_size = 1
# rnn的层数
num_layers = 1

# 独特向量映射表,特征数即为列数
oneHotLookup = np.eye(input_size).tolist()
# 根据独热向量映射表,将x_data映射为独热向量
x_OneHot = [oneHotLookup[x] for x in x_data]
# 转换到输入
inputs = torch.tensor(x_OneHot, dtype=torch.float32).view(-1, batch_size, input_size)

# y转换为labels,和forward的output的矩阵统一,为(seq*batch,1)
labels = torch.tensor(y_data)


class MyGRU(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, batch_size, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.num_layers = num_layers
        self.hidden_size = hidden_size
        self.batch_size = batch_size
        self.gru = nn.GRU(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers)

    def forward(self, input):
        hidden = torch.zeros(self.num_layers, self.batch_size, self.hidden_size)
        output, _ = self.gru(input, hidden)
        # 转换为(seqLen*batchSize,hiddenSize)的矩阵
        return output.view(-1, self.hidden_size)


# 初始化模型
model = MyGRU(input_size, hidden_size, num_layers, batch_size)
# 定义loss
criterion = nn.CrossEntropyLoss()
# 定义优化器
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)

for epoch in range(100):
    # 计算前馈
    output = model(inputs)
    # 计算损失
    loss = criterion(output, labels)
    # 清除梯度
    optimizer.zero_grad()
    # 计算反向传播
    loss.backward()
    # 更新权重
    optimizer.step()

    # 在(2维矩阵)每一行中寻找最大值的索引
    _, idx = output.max(dim=1)
    # 将索引映射到字符
    li = [idx2char[i] for i in idx]
    # 合成字符串
    r = ''.join(li)
    print(f"epoch: {epoch}, loss: {loss.item()}, {r}")
