"""
Created on: 2025-01-14
Author: lihz
https://www.runoob.com/pytorch/pytorch-recurrent-neural-network.html
"""

import torch
from torch import nn
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np

# 数据集：字符序列预测 （Hello -> Elloh）
char_set = list('hello')
char_2_idx = {w: i for i, w in enumerate(char_set)}
idx_2_char = {i: w for i, w in enumerate(char_set)}

# 数据准备
input_str = 'hello'
target_str = 'elloh'
input_data = [char_2_idx[c] for c in input_str]
target_data = [char_2_idx[c] for c in target_str]

# 转换为独热编码
input_one_hot = np.eye(len(char_set))[input_data]

# 转换为PyTorch张量
input_tensor = torch.tensor(input_one_hot, dtype=torch.float32)
target_tensor = torch.tensor(target_data, dtype=torch.long)

# 模型超参数定义
input_size = len(char_set)
hidden_size = 8
output_size = len(char_set)
num_epochs = 1000
learning_rate = 0.01

# 定义RNN网络模型
class RNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(RNN, self).__init__()
        self.hidden_size = hidden_size  # 每个时间步产生的隐藏状态向量的维度
        self.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x, hidden):
        output, hidden = self.rnn(x, hidden)
        output = self.fc(output)
        return output, hidden

model = RNN(input_size, hidden_size, output_size)

# 损失函数和优化器定义
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# 训练过程
loss_list = []
hidden = None
for epoch in range(num_epochs):
    model.zero_grad()
    # 前向传播
    output, hidden = model.forward(input_tensor.unsqueeze(0), hidden)
    hidden = hidden.detach()  # 避免梯度累积
    # 避免梯度累积：当训练一个 RNN 时，每次迭代的隐藏状态是从上一步传递到下一步的。
    # 如果不通过 detach() 断开这种依赖关系，RNN在训练时会保持对所有历史步骤的计算图的连接。
    # 这就意味着，整个计算图中的所有操作都会被反向传播计算梯度，可能导致梯度爆炸或者训练非常慢。

    # 计算损失
    loss = criterion(output.reshape(-1, output_size), target_tensor)
    loss_list.append(loss.item())

    # 反向传播和优化
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    if (epoch + 1) % 50 == 0:
        print(f"Epoch: {epoch+1}/{num_epochs}, Loss: {loss.item()}")

# 预测过程
with torch.no_grad():
    hidden = None
    output, hidden = model.forward(input_tensor.unsqueeze(0), hidden)
    predicted = torch.argmax(output, dim=2).squeeze().numpy()

    print("Input sequence: ", ''.join([idx_2_char[i] for i in input_data]))
    print("Predicted sequence: ", ''.join([idx_2_char[i] for i in predicted]))

# 可视化损失
plt.plot(range(1, num_epochs + 1), loss_list, label="Training Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("RNN Training Loss Over Epochs")
plt.legend()
# plt.show()
plt.savefig('c-rnn_loss.png')