Simple way to use the model model_name = "sanjeev498/en-to-romanian" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) model = model.half() model.to("cuda") sentences = ["This is a test"] inputs = tokenizer(sentences, padding=True, return_tensors="pt").to("cuda") translated_tokens = model.generate( **inputs ) translations = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)