"""
目标是将hello->ohlol
使用RNNCell
"""
import torch
from torch import nn

# 定义独热向量
one_hot_lookup = [
    [1, 0, 0, 0],
    [0, 1, 0, 0],
    [0, 0, 1, 0],
    [0, 0, 0, 1],
]
idx2char = ['h', 'e', 'l', 'o']

# 输入的维度(特征数)
input_size = 4
# 隐层的维度
hidden_size = 4
# 每批样本数
batch_size = 1

# hello
x_data = [0, 1, 2, 2, 3]
# 将x_data映射到tensor
x_one_hot = [one_hot_lookup[x] for x in x_data]
# 转变为(sqe,batch,input)
inputs = torch.tensor(x_one_hot, dtype=torch.float32).view(-1, batch_size, input_size)

# ohlol
y_data = [3, 0, 2, 3, 2]
# 将y转成1列
labels = torch.tensor(y_data, dtype=torch.long).view(-1, 1)


class MyRNNCell(nn.Module):
    def __init__(self, input_size, hidden_size, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.rnn_cell = nn.RNNCell(input_size, hidden_size)

    def forward(self, input, hidden):
        hidden = self.rnn_cell(input, hidden)
        return hidden

    @staticmethod
    def init_hidden(batch_size):
        return torch.zeros(batch_size, hidden_size)


# 定义模型
model = MyRNNCell(input_size, hidden_size)
# 定义loss计算
criterion = nn.CrossEntropyLoss()
# 定义优化器
optimizer = torch.optim.Adam(model.parameters(), lr=0.2)

for epoch in range(20):
    # 初始化h0
    hidden = MyRNNCell.init_hidden(batch_size)
    # 每一轮都清除一次梯度
    optimizer.zero_grad()
    # 计算所有的loss,rnn循环的所有的loss叠加
    loss = torch.tensor(0.0)
    # 此处进行rnn计算
    for input, label in zip(inputs, labels):
        # 进行前向计算
        hidden = model(input, hidden)
        # 计算单个rnn的损失
        tempLoss = criterion(hidden, label)
        # 叠加损失
        loss += tempLoss
        # 输出值
        _, idx = hidden.max(dim=1)
        print(idx2char[idx.item()], end="")
    loss.backward()
    optimizer.step()
    print(f" epoch: {epoch}, loss: {loss.item()}")
