from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch


def print_hi(name):
    # -*- coding: utf-8 -*-

    # tokenizer = AutoTokenizer.from_pretrained("./opus-mt-en-zh")
    tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-zh")

    # model = AutoModelForSeq2SeqLM.from_pretrained("./opus-mt-en-zh")
    model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-zh")

    text = "i'm sure you also have had experiences you know of that "
    # Tokenize the text
    batch = tokenizer.prepare_seq2seq_batch(src_texts=[text])

    # Make sure that the tokenized text does not exceed the maximum
    # allowed size of 512
    # batch["input_ids"] = batch["input_ids"][:, :512]
    # batch["attention_mask"] = batch["attention_mask"][:, :512]

    batch["input_ids"] = torch.as_tensor(batch["input_ids"])
    batch["attention_mask"] = torch.as_tensor(batch["attention_mask"])

    # Perform the translation and decode the output
    translation = model.generate(**batch)
    result = tokenizer.batch_decode(translation, skip_special_tokens=True)
    print(result)


if __name__ == '__main__':
    print_hi('PyCharm')
