from transformer_nmt import *

data = PrepareData(TRAIN_FILE, DEV_FILE)

src_vocab = len(data.en_word_dict)
trg_vocab = len(data.cn_word_dict)

model = make_model(
    src_vocab,
    trg_vocab,
    LAYERS,
    D_MODEL,
    D_FF,
    H_NUM,
    DROPOUT
)

model.load_state_dict(torch.load('save/last.pt', map_location=torch.device('cpu')))

sent_en = 'this is a dog.'
with torch.no_grad():
    # 读取数据、分词
    sent_en = ["BOS"] + word_tokenize(sent_en) + ["EOS"]
    print(sent_en)
    # 单词转为id
    out_en_ids = [data.en_word_dict.get(word, UNK) for word in sent_en]
    out_en_ids = torch.from_numpy(np.array(out_en_ids)).long()
    out_en_ids = out_en_ids.unsqueeze(0)
    src_mask = (out_en_ids != 0).unsqueeze(-2)
    # 用训练好的模型进行decode预测
    out = greedy_decode(model, out_en_ids, src_mask, max_len=MAX_LENGTH, start_symbol=data.cn_word_dict["BOS"])
    translation = []
    # 遍历翻译输出字符的下标（注意：开始符"BOS"的索引0不遍历）
    for j in range(1, out.size(1)):
        # 获取当前下标的输出字符
        sym = data.cn_index_dict[out[0, j].item()]
        # 如果输出字符不为'EOS'终止符，则添加到当前语句的翻译结果列表
        if sym != 'EOS':
            translation.append(sym)
        # 否则终止遍历
        else:
            break
    # 打印模型翻译输出的中文语句结果
    print("translation: %s" % " ".join(translation))
