multimodalart HF staff commited on
Commit
b2ef087
1 Parent(s): fffc321

Remove queue

Browse files

Remove queue to test if underlying queue works

Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -9,7 +9,6 @@ import shortuuid
9
  latent = gr.Interface.load("spaces/multimodalart/latentdiffusion")
10
  rudalle = gr.Interface.load("spaces/multimodalart/rudalle")
11
  diffusion = gr.Interface.load("spaces/multimodalart/diffusion")
12
- print(diffusion)
13
  vqgan = gr.Interface.load("spaces/multimodalart/vqgan")
14
 
15
  def text2image_latent(text,steps,width,height,images,diversity):
@@ -57,6 +56,9 @@ def text2image_diffusion(text,steps_diff, images_diff, weight, clip):
57
  image_paths.append(image_path)
58
  return(image_paths)
59
 
 
 
 
60
  css_mt = {"margin-top": "1em"}
61
 
62
  empty = gr.outputs.HTML()
@@ -108,4 +110,4 @@ with gr.Blocks() as mindseye:
108
  get_image_rudalle.click(text2image_rudalle, inputs=[text,aspect,model], outputs=gallery)
109
  get_image_vqgan.click(text2image_vqgan, inputs=[text,width_vq,height_vq,style,steps_vq,flavor],outputs=gallery)
110
  get_image_diffusion.click(text2image_diffusion, inputs=[text, steps_diff, images_diff, weight, clip],outputs=gallery)
111
- mindseye.launch()
 
9
  latent = gr.Interface.load("spaces/multimodalart/latentdiffusion")
10
  rudalle = gr.Interface.load("spaces/multimodalart/rudalle")
11
  diffusion = gr.Interface.load("spaces/multimodalart/diffusion")
 
12
  vqgan = gr.Interface.load("spaces/multimodalart/vqgan")
13
 
14
  def text2image_latent(text,steps,width,height,images,diversity):
 
56
  image_paths.append(image_path)
57
  return(image_paths)
58
 
59
+ def text2image_dallemini(text):
60
+ pass
61
+
62
  css_mt = {"margin-top": "1em"}
63
 
64
  empty = gr.outputs.HTML()
 
110
  get_image_rudalle.click(text2image_rudalle, inputs=[text,aspect,model], outputs=gallery)
111
  get_image_vqgan.click(text2image_vqgan, inputs=[text,width_vq,height_vq,style,steps_vq,flavor],outputs=gallery)
112
  get_image_diffusion.click(text2image_diffusion, inputs=[text, steps_diff, images_diff, weight, clip],outputs=gallery)
113
+ mindseye.launch(enable_queue=False)