muhtasham commited on
Commit
c1113dc
1 Parent(s): b3b212c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -44,10 +44,10 @@ def zero_shot(image, text_input):
44
 
45
  with gr.Blocks() as demo:
46
  gr.Markdown( """
47
- Input voice/text
48
- Convert voice/text to image via Latent Diffusion
49
- Given list of labels and a selected image from gallery do zero-shot classification
50
- Coming soon: TTS(audio) your output label as: Your output looks like "label of zero-shot"
51
  """)
52
  with gr.Row():
53
  with gr.Column():
@@ -75,7 +75,7 @@ with gr.Blocks() as demo:
75
 
76
 
77
  speech_to_text.click(speech_to_text, inputs=audio_file, outputs=text)
78
- get_image_latent.click(text2image_latent, inputs=[text,steps,width,height,images,diversity], outputs=gallery)
79
- zero_shot_clf.click(zero_shot, inputs=[gallery,text_input], outputs=label)
80
-
81
- demo.launch(enable_queue=False)
 
44
 
45
  with gr.Blocks() as demo:
46
  gr.Markdown( """
47
+ - [x] Input voice/text
48
+ - [x] Convert voice/text to image via Latent Diffusion
49
+ - [x] Given list of labels and a selected image from gallery do zero-shot classification
50
+ - [ ] Coming soon: TTS(audio) your output label as: Your output looks like "label of zero-shot"
51
  """)
52
  with gr.Row():
53
  with gr.Column():
 
75
 
76
 
77
  speech_to_text.click(speech_to_text, inputs=audio_file, outputs=text)
78
+ get_image_latent.click(text2image_latent, inputs=[text, steps, width, height, images, diversity], outputs=gallery)
79
+ zero_shot_clf.click(zero_shot, inputs=[gallery, text_input], outputs=label)
80
+
81
+ demo.launch()