from config import *
from util import *
from model import make_model,get_padding_mask
import torch
from torch.nn.utils.rnn import pad_sequence

if __name__ == '__main__':
    en_id2vocab,en_vocab2id = get_vocab("en")
    zh_id2vocab,zh_vocab2id = get_vocab("zh")

    SRC_VOCAB_SIZE = len(en_id2vocab)
    TGT_VOCAB_SIZE = len(zh_id2vocab)

    model = make_model(SRC_VOCAB_SIZE,TGT_VOCAB_SIZE,D_MODEL,N_HEAD,D_FF,N,DROPOUT).to(device)
    model.load_state_dict(torch.load("/root/project/Code/sshcode/Transformer/Transformer_model/best.pt",
                          map_location=device))

    model.eval()
    texts = [
        "But the concern goes beyond Washington: many ordinary citizens in the US and elsewhere genuinely fear the consequences of a Trump administration.",
        "Today, the British economy continues to grow.",
        "But people who leave a country have not disappeared.",
    ]

    batch_src_token = [[en_vocab2id.get(v.lower(),UNK_ID) for v in divided_en(text)] for text in texts]
    batch_src = [torch.LongTensor([SOS_ID]+src+[EOS_ID]) for src in batch_src_token]
    src_x = pad_sequence(batch_src,True,PAD_ID)
    src_mask = get_padding_mask(src_x,PAD_ID)

    prob_sent = batch_greedy_decode(model,src_x,src_mask)
    print(prob_sent)