from transformers import AutoTokenizer, AutoModelWithLMHead, TranslationPipeline, pipeline model_checkpoint = "Helsinki-NLP/opus-mt-en-fr" translator = pipeline("translation", model=model_checkpoint) # print(translator("how old are you")) ######################################################################################## from transformers import AutoTokenizer, AutoModelWithLMHead, TranslationPipeline pipeline = TranslationPipeline( model=AutoModelWithLMHead.from_pretrained("SEBIS/legal_t5_small_trans_fr_en"), tokenizer=AutoTokenizer.from_pretrained(pretrained_model_name_or_path = "SEBIS/legal_t5_small_trans_fr_en", do_lower_case=False, skip_special_tokens=True), device=0) fr_text = "salut, comment vas-tu ?" translator2 = pipeline([fr_text], max_length=512) print(translator2) ######################################################################################## responseBase = "this is the second test" def englishtofrench(): print(translator(responseBase)) return translator(responseBase) englishtofrench()