ysharma HF staff commited on
Commit
66c69b6
1 Parent(s): 5aa86ef
Files changed (1) hide show
  1. app.py +10 -9
app.py CHANGED
@@ -6,20 +6,21 @@ def text2speech(text):
6
  return fastspeech(text)
7
 
8
  def engine(text_input):
9
- #ner = gr.Interface.load("huggingface/flair/ner-english-ontonotes-large")
10
- #entities = ner(text_input)
11
- #entities = [tupl for tupl in entities if None not in tupl]
12
- #entities_num = len(entities)
13
 
14
  #img = run(text_input,'50','256','256','1',10) #entities[0][0]
 
15
  img = gr.Interface.load("spaces/multimodalart/latentdiffusion")(text_input,'50','256','256','1',10)[0] #inputs={'prompt':text_input,'steps':'50','width':'256','height':'256','images':'1','scale':10}).launch()
16
  #img_intfc = gr.Interface.load("spaces/multimodalart/latentdiffusion", inputs=[gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text")],
17
  #outputs=[gr.outputs.Image(type="pil", label="output image"),gr.outputs.Carousel(label="Individual images",components=["image"]),gr.outputs.Textbox(label="Error")], )
18
  #title="Convert text to image")
19
  #img = img_intfc[0]
20
  #img = img_intfc(text_input,'50','256','256','1',10)
21
- print(img)
22
- print(type(img))
23
  #print(img)
24
  #print(type(img[1][0][0]))
25
  #print(img[1])
@@ -27,13 +28,13 @@ def engine(text_input):
27
  #inputs=['George',50,256,256,1,10]
28
  #run(prompt, steps, width, height, images, scale)
29
 
30
- #speech = text2speech(text_input)
31
- return img #entities, speech, img
32
 
33
  #image = gr.outputs.Image(type="pil", label="output image")
34
  app = gr.Interface(engine,
35
  gr.inputs.Textbox(lines=5, label="Input Text"),
36
- gr.outputs.Image(type="auto", label="Output"),
37
  #live=True,
38
  #outputs=[#gr.outputs.Textbox(type="auto", label="Text"),gr.outputs.Audio(type="file", label="Speech Answer"),
39
  #outputs= img, #gr.outputs.Carousel(label="Individual images",components=["image"]), #, gr.outputs.Textbox(label="Error")],
 
6
  return fastspeech(text)
7
 
8
  def engine(text_input):
9
+ ner = gr.Interface.load("huggingface/flair/ner-english-ontonotes-large")
10
+ entities = ner(text_input)
11
+ entities = [tupl for tupl in entities if None not in tupl]
12
+ entities_num = len(entities)
13
 
14
  #img = run(text_input,'50','256','256','1',10) #entities[0][0]
15
+
16
  img = gr.Interface.load("spaces/multimodalart/latentdiffusion")(text_input,'50','256','256','1',10)[0] #inputs={'prompt':text_input,'steps':'50','width':'256','height':'256','images':'1','scale':10}).launch()
17
  #img_intfc = gr.Interface.load("spaces/multimodalart/latentdiffusion", inputs=[gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text"), gr.inputs.Textbox(lines=1, label="Input Text")],
18
  #outputs=[gr.outputs.Image(type="pil", label="output image"),gr.outputs.Carousel(label="Individual images",components=["image"]),gr.outputs.Textbox(label="Error")], )
19
  #title="Convert text to image")
20
  #img = img_intfc[0]
21
  #img = img_intfc(text_input,'50','256','256','1',10)
22
+ #print(img)
23
+ #print(type(img))
24
  #print(img)
25
  #print(type(img[1][0][0]))
26
  #print(img[1])
 
28
  #inputs=['George',50,256,256,1,10]
29
  #run(prompt, steps, width, height, images, scale)
30
 
31
+ speech = text2speech(text_input)
32
+ return img, entities, speech
33
 
34
  #image = gr.outputs.Image(type="pil", label="output image")
35
  app = gr.Interface(engine,
36
  gr.inputs.Textbox(lines=5, label="Input Text"),
37
+ [gr.outputs.Image(type="auto", label="Output"), gr.outputs.Textbox(type="auto", label="Text"), gr.outputs.Audio(type="file", label="Speech Answer") ],
38
  #live=True,
39
  #outputs=[#gr.outputs.Textbox(type="auto", label="Text"),gr.outputs.Audio(type="file", label="Speech Answer"),
40
  #outputs= img, #gr.outputs.Carousel(label="Individual images",components=["image"]), #, gr.outputs.Textbox(label="Error")],