import gradio as gr from transformers import pipeline pipe = pipeline("translation_en_to_ml", model="t5-base") def predict(text): return pipe(text)[0]["translation_text"] iface = gr.Interface( fn=predict, inputs=[gr.inputs.Textbox(label="text", lines=3)], outputs='text', examples=[["Hello! My name is Rajesh"], ["How are you?"]] ) iface.launch() # import gradio as gr # from transformers import MBartForConditionalGeneration, MBart50TokenizerFast,MBartTokenizerFast,MBart50Tokenizer # from transformers import MBartTokenizer,MBartForConditionalGeneration, MBartConfig # model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-one-to-many-mmt") # tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-one-to-many-mmt",src_lang="en_XX") # def get_input(text): # models_input = tokenizer(text,return_tensors="pt") # generated_tokens = model.generate(**models_input,forced_bos_token_id=tokenizer.lang_code_to_id["ml_IN"]) # translation = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) # return translation # iface = gr.Interface(fn=get_input,inputs="text",outputs="text", title = "English to Malayalam Translator",description="Get Malayalam translation for your text in English") # iface.launch()