import re
import collections
import torch
from torch import nn
import torch.nn.functional as F

sequence = "Where is the captain of China?The captain of China is Beijing."

sequence = re.sub("[^a-zA-Z]+", " ", sequence).lower().strip()
words = sequence.split()

word_freqs = collections.Counter(words)
word_dict = list(word_freqs.items())
word_dict = sorted(word_dict, key=lambda x: x[1], reverse=True)
idx_to_token = [item[0] for item in word_dict]
token_to_idx = {item: index for index, item in enumerate(idx_to_token)}

# vocab_size 总共有多少个不同的词汇
vocab_size = len(idx_to_token)
hidden_size = 20

data = torch.tensor([token_to_idx.get(word) for word in words])
inputs = data[:-1].unsqueeze(-1)  # (11,1)
outputs = data[1:].unsqueeze(-1)  # (11,1)
inputs = F.one_hot(inputs, vocab_size).float()  # (11,1,7) == (seq_len,batch_size,vocab_size)
outputs = F.one_hot(outputs, vocab_size).float()  # (11,1,7)

# 设置RNN参数
w_xh = torch.randn((vocab_size, hidden_size), requires_grad=True)  # (7,20)
w_hh = torch.randn((hidden_size, hidden_size), requires_grad=True)  # (20,20)
b_h = torch.randn((hidden_size,), requires_grad=True)  # (20,)

w_hq = torch.randn((hidden_size, vocab_size), requires_grad=True)  # (20,7)
b_q = torch.randn((vocab_size,), requires_grad=True)  # (7,)

criterion = nn.MSELoss()
optimizer = torch.optim.Adam([w_xh, w_hh, b_h, w_hq, b_q], lr=0.001)

# ========================= 单句子的RNN训练 =========================

epochs = 3000
for epoch in range(epochs):
    optimizer.zero_grad()
    h = torch.zeros((1, hidden_size))
    tmp = []
    for x in inputs:
        h = torch.tanh(x @ w_xh + h @ w_hh + b_h)
        y_hat = h @ w_hq + b_q
        tmp.append(y_hat)
    predicts = torch.stack(tmp, dim=0)
    loss = criterion(predicts, outputs)
    loss.backward()

    # 梯度裁剪：原理将大于0.5的梯度值的权重更新为0
    nn.utils.clip_grad_value_([w_xh, w_hh, b_h, w_hq, b_q], clip_value=0.5)
    optimizer.step()

    print(f"epoch {epoch + 1} -- loss:{loss.item():.4f}")

# ========================= 单句子的RNN预测 =========================
test_seq = "Where is the captain"
test_idx = [token_to_idx[word] for word in test_seq.lower().split()]
test_input = torch.tensor(test_idx).unsqueeze(-1)
test_input = F.one_hot(test_input, vocab_size).float()
# 续写长度
steps = 4
# 定义输出 (因为前面的内内容已有，所以不需要添加)
seq_out = [item for item in test_input]
h = torch.zeros((1, hidden_size))
# 对前面已知的句子的状态进行累加
for x in test_input[:-1]:
    h = torch.tanh(x @ w_xh + h @ w_hh + b_h)
# 对后续内容进行预测
for _ in range(steps):
    h = torch.tanh(seq_out[-1] @ w_xh + h @ w_hh + b_h)
    y_hat = h @ w_hq + b_q
    seq_out.append(y_hat)

print(len(seq_out))  # 输出整个句子的长度
seq_out = torch.stack(seq_out, dim=0)  # 将所有内容转化为torch
out_list = torch.argmax(seq_out, dim=-1).squeeze(-1)  # 输出的下标值列表
print([idx_to_token[idx] for idx in out_list])  # 输出文本列表
