import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import requests from diffusers import DiffusionPipeline image = gr.outputs.Image(type="pil", label="Your result") css = ".output-image{height: 528px !important} .output-carousel .output-image{height:272px !important} a{text-decoration: underline}" def translate(hin_snippet): inputs = tokenizer.encode( hin_snippet, return_tensors="pt",padding=True,max_length=512,truncation=True) outputs = model.generate( inputs, max_length=128, num_beams=None, early_stopping=True) translated = tokenizer.decode(outputs[0]).replace('',"").strip().lower() model_id = "CompVis/ldm-text2im-large-256" ldm = DiffusionPipeline.from_pretrained(model_id) images = ldm([translated], num_inference_steps=50, eta=0.3, guidance_scale=6)["sample"] return images[0].save(f"out.png") tokenizer = AutoTokenizer.from_pretrained("salesken/translation-hi-en") model = AutoModelForSeq2SeqLM.from_pretrained("salesken/translation-hi-en") # due to covid, we have reduced our debt interest iface = gr.Interface(fn=translate, inputs="text",outputs="image") iface.launch()