"""
使用pytorch完成字符串的预测（共100分）
"""
import torch as pt
import numpy as np
import matplotlib.pyplot as plt

HIDDEN_SIZE = 15
ALPHA = 0.001
ITERS = 600

# （1）数据处理
# ①定义字符串
x_char = "how are you  "
y_char = "fine, thank you"

# ②将数据进行合理的预处理


def process_sentence(sent):
    sent = sent.replace(',', ' ')
    sent = sent.strip()
    arr = sent.split()
    return arr


x_sent = process_sentence(x_char)
y_sent = process_sentence(y_char)
n_step = len(x_sent)
word_dict_set = set(x_sent) | set(y_sent)
word_dict_list = list(word_dict_set)
idx2word = {i: ch for i, ch in enumerate(word_dict_list)}
word2idx = {ch: i for i, ch in enumerate(word_dict_list)}
n_dict = len(idx2word)
print(idx2word)
print(word2idx)
print('n_dict', n_dict)

x_sent = [x_sent, x_sent]
y_sent = [y_sent, y_sent]
x_idx = np.int64([[word2idx[word] for word in row] for row in x_sent])
y_idx = np.int64([[word2idx[word] for word in row] for row in y_sent])
print('x_idx', x_idx)
print('y_idx', y_idx)

x_oh = np.eye(n_dict, dtype=np.int64)[x_idx]
y_oh = np.eye(n_dict, dtype=np.int64)[y_idx]
print('x_oh', x_oh)
print('y_oh', y_oh)


# （2）模型类创建
class LstmModel(pt.nn.Module):

    def __init__(self, input_size, hidden_size, num_layers, **kwargs):
        super().__init__(**kwargs)
        self.hidden_size = hidden_size
        # ①使用LSTM网络
        self.lstm = pt.nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
        )
        self.fc = pt.nn.Linear(hidden_size, input_size)

    # ②进行正向传播，参考下面图示
    def forward(self, x):
        x, _ = self.lstm(x)
        x = x.reshape(-1, self.hidden_size)
        x = self.fc(x)
        return x


# （3）模型预测
# ①创建循环神经网络模型对象
model = LstmModel(n_dict, HIDDEN_SIZE, 2)

# ②合理选择优化器与损失函数
optim = pt.optim.Adam(model.parameters(), lr=ALPHA)
criterion = pt.nn.CrossEntropyLoss()

# ③使用训练集数据进行拟合，循环次数自拟
GROUP = int(np.ceil(ITERS / 20))
loss_his = []
x_oh = pt.Tensor(x_oh)
y_idx = y_idx.reshape(-1)
y_idx = pt.Tensor(y_idx).long()
for i in range(ITERS):
    model.train(True)
    optim.zero_grad()
    pred = model(x_oh)
    loss = criterion(pred, y_idx)
    loss.backward()
    optim.step()
    model.train(False)
    loss = loss.detach().numpy()
    loss_his.append(loss)
    if i % GROUP == 0 or i == ITERS - 1:
        pred = pred.argmax(dim=-1)
        pred = pred.reshape(-1, n_step)
        pred = pred.detach().numpy()
        pred_sent = [[''.join(idx2word[idx]) for idx in row] for row in pred]
        print(f'#{i + 1}: loss = {loss}, pred_sentence = {pred_sent}')

# ④绘制损失值变化曲线
plt.plot(loss_his)

# ⑤使用测试集数据进行预测，打印真实值对应的预测值
# 已在上面循环中打印

# Finally
plt.show()
