# 1.使用pytorch，完成hello处理（每题10分）
import torch

# (1)数据处理
# ①将h, i, e, l, o作为字典存储
# ②使用hihello获取hihell	和ihello分别作为x, y
sample = "hihello"
char_set = set(sample)
onehot_dim = len(char_set)
x = sample[:-1]
y = sample[1:]
seq_len = len(x)
# ③将数据按照要求进行独热处理
char_to_int = {j: i for (i, j) in enumerate(char_set)}
int_to_char = {i: j for (i, j) in enumerate(char_set)}
x = torch.tensor([char_to_int[i] for i in x])
y = torch.tensor([char_to_int[i] for i in y])
x = torch.nn.functional.one_hot(x, onehot_dim).reshape(-1, seq_len, onehot_dim)


# ④将数据放入tensor
# (2)模型处理
# ①设置处理基础参数，类别5输出特征5，隐藏神经5，序列长度6
# ②创建模型类
class LSTM(torch.nn.Module):

    def __init__(self) -> None:
        super().__init__()
        self.lstm = torch.nn.LSTM(input_size=onehot_dim,  # in-f=5=char_dim;
                                  hidden_size=128,  # out-f = 5= char_dim
                                  num_layers=2,
                                  batch_first=True)
        self.fc = torch.nn.Linear(128, onehot_dim)

    def forward(self, x):
        out, _ = self.lstm(x)
        out = self.fc(out)
        out = out.view(-1, onehot_dim)
        return out


# ③使用LSTM对模型进行处理
lstm = LSTM()
loss_fn = torch.nn.CrossEntropyLoss()
op = torch.optim.Adam(lstm.parameters(), lr=0.1)

for epoch in range(200):
    op.zero_grad()
    predict = lstm(x.float())
    loss = loss_fn(predict, y)
    loss.backward()
    op.step()
    if epoch % 5 == 0:
        print("epoch: %d, =======loss: %1.3f" % (epoch + 1,
                                                 loss.item()))
        # pred:(6,5) 每个输入字母在五个类别(i,h,e,l,o)上的预测概率
        idx = torch.argmax(predict, 1).data.numpy()
        # 对pred求每一行的最大值索引idx ，并转为字符
        result_str = [int_to_char[c] for c in idx]

        print("Predicted string: ", ''.join(result_str))

# ④创建正向传播
# ⑤模型编译测试，反向传播
# ⑥打印训练后的预测结果
