import gradio as gr import easyocr import transformers reader=easyocr.Reader(['en']) # this needs to run only once to load the model into memory result=reader.readtext('https://huggingface.co/spaces/KAPtechies/Translation/blob/main/WhatsApp%20Image%202023-09-23%20at%208.03.28%20AM.jpeg',detail=0) news=" ".join(result) from transformers import AutoTokenizer tokenizer=AutoTokenizer.from_pretrained("facebook/mbart-large-50-one-to-many-mmt",use_fast=False) from transformers import MBartForConditionalGeneration # download and save model model=MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-one-to-many-mmt") input_text=[news] # convert sentences to tensors model_inputs=tokenizer(input_text,return_tensors="pt",padding=True,truncation=True) # translate from English to Hindi generated_tokens=model.generate( **model_inputs, forced_bos_token_id=tokenizer.lang_code_to_id["hi_IN"] ) translation=tokenizer.batch_decode(generated_tokens,skip_special_tokens=True) translation from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-50-one-to-many-mmt", use_fast=False) from transformers import MBartForConditionalGeneration # download and save model model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-one-to-many-mmt") def translator(img): reader = easyocr.Reader(['en']) result = reader.readtext(img,detail = 0) news= " ".join(result) input_text = [news] # convert sentences to tensors model_inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True) # translate from English to Hindi generated_tokens = model.generate( **model_inputs, forced_bos_token_id=tokenizer.lang_code_to_id["hi_IN"] ) translation = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) return translation demo = gr.Interface(fn=translator, inputs=gr.Image(), outputs="text") demo.launch(inline=False)