# 加载词汇表
import jieba
import torch

from model import Seq2Seq

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def read_vocab(path):
    with open(path, "r", encoding="utf8") as f:
        lines = f.readlines()
        vocab = []
        for line in lines:
            vocab.append(line.strip("\n"))
        return vocab


# 分词
def tokenizer(sentence):
    return [e for e in list(jieba.cut(sentence)) if e != " "] + ["<EOS>"]


# 映射函数
def mapping(token_sentence, vocab, reverse=False):
    map_sentence = []
    for token in token_sentence:
        if not reverse:
            map_sentence.append(vocab.index(token))
        else:
            map_sentence.append(vocab[token])
    return map_sentence


class configs():
    def __init__(self):
        self.batch_size = 1
        self.num_layers = 1
        self.hidden_size = 1024
        self.embedding_size = 512
        self.optim_l_r = 1e-3
        self.epochs = 100


config = configs()
vocab_en = read_vocab("./data/vocab/vocab_en.txt")
vocab_ch = read_vocab("./data/vocab/vocab_ch.txt")
# 定义模型以及加载模型
model = Seq2Seq(
    len(vocab_en),
    len(vocab_ch),
    config.embedding_size,
    config.hidden_size,
    config.num_layers,
    config.batch_size,
    40,
    vocab_ch,
)
# 输入被翻译句子
model.load_state_dict(torch.load("./models/seq2seq_model.pth", map_location=torch.device(device)))
model.eval()

input_sentence = "Tom has been on the wanted list for one year."
input_sentence = torch.tensor(mapping(tokenizer(input_sentence), vocab_en)).view(1, -1)
print(input_sentence)
output, hidden = model(input_sentence)
_, predicted = torch.max(output.data, 2)
predict = mapping(predicted[0], vocab_ch, reverse=True)
print("预测：", end="")
for p in predict:
    if p == "<EOS>": break
    print(p, end="")
print()