Spaces:
Sleeping
Sleeping
File size: 1,283 Bytes
a3a6ba7 432cf37 a3a6ba7 432cf37 a3a6ba7 432cf37 a3a6ba7 432cf37 a3a6ba7 432cf37 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import gradio as gr
from transformers import pipeline
pipe = pipeline("translation", model="t5-base")
def predict(text):
return pipe(text)[0]["translation_text"]
iface = gr.Interface(
fn=predict,
inputs=[gr.inputs.Textbox(label="text", lines=3)],
outputs='text',
examples=[["Hello! My name is Rajesh"], ["How are you?"]]
)
iface.launch()
# import gradio as gr
# from transformers import MBartForConditionalGeneration, MBart50TokenizerFast,MBartTokenizerFast,MBart50Tokenizer
# from transformers import MBartTokenizer,MBartForConditionalGeneration, MBartConfig
# model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-one-to-many-mmt")
# tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-one-to-many-mmt",src_lang="en_XX")
# def get_input(text):
# models_input = tokenizer(text,return_tensors="pt")
# generated_tokens = model.generate(**models_input,forced_bos_token_id=tokenizer.lang_code_to_id["ml_IN"])
# translation = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
# return translation
# iface = gr.Interface(fn=get_input,inputs="text",outputs="text", title = "English to Malayalam Translator",description="Get Malayalam translation for your text in English")
# iface.launch() |