import re
import collections
import torch
from torch import nn
import torch.nn.functional as F
from backbones.bi_rnn import RNN

sequence = "Where is the captain of China?The captain of China is Beijing."

sequence = re.sub("[^a-zA-Z]+", " ", sequence).lower().strip()
words = sequence.split()
words_mask = words.copy()
words_mask[5] = "<mask>"  # <unk> 标记未知的词汇 <mask> 掩码未知（将某个词进行屏蔽）

word_freqs = collections.Counter(words)
word_dict = list(word_freqs.items())
word_dict = sorted(word_dict, key=lambda x: x[1], reverse=True)
idx_to_token = ['<unk>', '<mask>'] + [item[0] for item in word_dict]
token_to_idx = {item: index for index, item in enumerate(idx_to_token)}

# vocab_size 总共有多少个不同的词汇
vocab_size = len(idx_to_token)
hidden_size = 20

# 将带掩码的数据作为输入，将不带掩码的数据作为输出
inputs = torch.tensor([token_to_idx.get(word) for word in words_mask])
outputs = torch.tensor([token_to_idx.get(word) for word in words])  # (12,)

inputs = inputs.unsqueeze(-1)  # (12,1)
inputs = F.one_hot(inputs, vocab_size).float()  # (12,1,9) == (seq_len,batch_size,vocab_size)

# 新建RNN模型
device = torch.device("cpu")
model = RNN(vocab_size, hidden_size, 1, bidirectional=True, device=device)

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

# ========================= 单句子的RNN训练 =========================

epochs = 1000
for epoch in range(epochs):
    optimizer.zero_grad()
    h = torch.zeros((2, 1, hidden_size))  # (num_layers * 2, batch_size,hidden_size)
    predicts, _ = model(inputs, h)
    loss = criterion(predicts, outputs)
    loss.backward()

    # 梯度裁剪：原理将大于0.5的梯度值的权重更新为0
    nn.utils.clip_grad_value_(model.parameters(), clip_value=0.5)
    optimizer.step()

    print(f"epoch {epoch + 1} -- loss:{loss.item():.4f}")

# ========================= 单句子的RNN预测 =========================
test_seq = "Where is the captain of <mask> The captain of <mask> is <mask>"
test_idx = [token_to_idx[word] for word in test_seq.lower().split()]
test_input = torch.tensor(test_idx).unsqueeze(-1)  # (12,1)
test_input = F.one_hot(test_input, vocab_size).float()  # (12,1,9)
model.eval()  # 开启预测模式
h = torch.zeros((2, 1, hidden_size))
results, _ = model(test_input, h)  # results (12,9)
# 输出训练的结果
print([idx_to_token[index.item()] for index in torch.argmax(results, dim=-1)])
