import gradio as gr fastspeech = gr.Interface.load("huggingface/facebook/fastspeech2-en-ljspeech") def text2speech(text): return fastspeech(text) def engine(text_input): ner = gr.Interface.load("huggingface/flair/ner-english-ontonotes-large") entities = ner(text_input) entities = [tupl for tupl in entities if None not in tupl] entities_num = len(entities) #img = run(text_input,'50','256','256','1',10) #entities[0][0] img = gr.Interface.load("spaces/multimodalart/latentdiffusion")(text_input,'50','256','256','1',10)[0] #inputs={'prompt':text_input,'steps':'50','width':'256','height':'256','images':'1','scale':10}).launch() #img_intfc = gr.Interface.load("spaces/multimodalart/latentdiffusion", inputs=[gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text")], #outputs=[gr.outputs.Image(type="pil", label="output image"),gr.outputs.Carousel(label="Individual images",components=["image"]),gr.outputs.Textbox(label="Error")], ) #title="Convert text to image") #img = img_intfc[0] #img = img_intfc(text_input,'50','256','256','1',10) #print(img) #print(type(img)) #print(img) #print(type(img[1][0][0])) #print(img[1]) #img = img[0] #inputs=['George',50,256,256,1,10] #run(prompt, steps, width, height, images, scale) speech = text2speech(text_input) return img, entities, speech #image = gr.outputs.Image(type="pil", label="output image") app = gr.Interface(engine, gr.inputs.Textbox(lines=5, label="Input Text"), [gr.outputs.Image(type="auto", label="Output"), gr.outputs.Textbox(type="auto", label="Text"), gr.outputs.Audio(type="file", label="Speech Answer") ], #live=True, #outputs=[#gr.outputs.Textbox(type="auto", label="Text"),gr.outputs.Audio(type="file", label="Speech Answer"), #outputs= img, #gr.outputs.Carousel(label="Individual images",components=["image"]), #, gr.outputs.Textbox(label="Error")], examples = ['Apple'], description="Takes a text as input and reads it out to you." #examples=["On April 17th Sunday George celebrated Easter. He is staying at Empire State building with his parents. He is a citizen of Canada and speaks English and French fluently. His role model is former president Obama. He got 1000 dollar from his mother to visit Disney World and to buy new iPhone mobile. George likes watching Game of Thrones."] ).launch(debug=True) #(enable_queue=True) #get_audio = gr.Button("generate audio") #get_audio.click(text2speech, inputs=text, outputs=speech) #def greet(name): # return "Hello " + name + "!!" #iface = gr.Interface(fn=greet, inputs="text", outputs="text") #iface.launch()