from transformers import T5ForConditionalGeneration, T5Tokenizer

import time


device = 'cpu' #or 'cpu' for translate on cpu

# model_name = '/home/huangjiayu/wanfang-translation-python/models/t5_translate_en_ru_zh_large_1024'
model_name = '/home/huangjiayu/wanfang-translation-python/models/t5_translate_en_ru_zh_small_1024'

model = T5ForConditionalGeneration.from_pretrained(model_name)
model.to(device)
tokenizer = T5Tokenizer.from_pretrained(model_name)

def getModelAndTokenizer():
    return model, tokenizer

if __name__ == '__main__':
    prefix = 'translate to zh: '
    src_text = prefix + "This is a Chinese statement."


    # translate Russian to Chinese
    input_ids = tokenizer(src_text, return_tensors="pt")
    print(len(input_ids['input_ids'][0]))

    generated_tokens = model.generate(**input_ids.to(device))

    start = time.time()
    result = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
    end = time.time()
    print(end-start,' s')
    # print(result)
    # 再吃这些法国的甜蜜的面包。
