from transformers import AutoModelForSeq2SeqLM, AutoTokenizer # Define the model and tokenizer model_name = "t5-small" model = AutoModelForSeq2SeqLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) # Provide a text to be converted text = "Translate this text to French." # Tokenize the text and generate the output inputs = tokenizer.encode("translate English to French: " + text, return_tensors="pt") outputs = model.generate(inputs, max_length=40, num_beams=4, early_stopping=True) generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) # Print the generated text print("Generated Text:", generated_text)