
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

import time

device = 'cpu' #cuda or 'cpu' for translate on cpu

model = AutoModelForSeq2SeqLM.from_pretrained("./models/opus-mt-en-zh")
model.to(device)

tokenizer = AutoTokenizer.from_pretrained("./models/opus-mt-en-zh")

def getModelAndTokenizer():
    return model, tokenizer

if __name__ == "__main__":
    src_text = "this is an example"

    input_ids = tokenizer(src_text, return_tensors="pt")

    generated_tokens = model.generate(**input_ids.to(device))

    start = time.time()
    result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
    end = time.time()
    print(end-start,' s')
    print(result)