multimodalart HF staff commited on
Commit
fd00b5e
1 Parent(s): c0ceaaf

Revert back

Browse files
Files changed (1) hide show
  1. app.py +28 -26
app.py CHANGED
@@ -44,35 +44,37 @@ def text2image_rudalle(text,aspect,model):
44
  css_mt = {"margin-top": "1em"}
45
 
46
  empty = gr.outputs.HTML()
47
- mindseye = gr.Blocks()
48
 
49
- with mindseye:
50
  gr.Markdown("<h1>MindsEye Lite <small><small>run multiple text-to-image models in one place</small></small></h1><p>MindsEye Lite orchestrates multiple text-to-image Hugging Face Spaces in one convenient space, so you can try different models. This work carries the spirit of <a href='https://multimodal.art/mindseye' target='_blank'>MindsEye Beta</a>, a tool to run multiple models with a single UI, but adjusted to the current hardware limitations of Spaces. MindsEye Lite was created by <a style='color: rgb(99, 102, 241);font-weight:bold' href='https://twitter.com/multimodalart' target='_blank'>@multimodalart</a>, keep up with the <a style='color: rgb(99, 102, 241);' href='https://multimodal.art/news' target='_blank'>latest multimodal ai art news here</a> and consider <a style='color: rgb(99, 102, 241);' href='https://www.patreon.com/multimodalart' target='_blank'>supporting us on Patreon</a></div></p>")
51
- #gr.Markdown("<style>.mx-auto.container .gr-form-gap {flex-direction: row; gap: calc(1rem * calc(1 - var(--tw-space-y-reverse)));} .mx-auto.container .gr-form-gap .flex-col, .mx-auto.container .gr-form-gap .gr-box{width: 100%}</style>")
52
- text = gr.inputs.Textbox(placeholder="Try writing something..", label="Prompt", default="A mecha robot in a favela")
53
-
54
  with gr.Column():
55
- with gr.Row():
56
- with gr.Tabs():
57
  with gr.TabItem("Latent Diffusion"):
58
  steps = gr.inputs.Slider(label="Steps - more steps can increase quality but will take longer to generate",default=45,maximum=50,minimum=1,step=1)
59
- #width = gr.inputs.Slider(label="Width", default=256, step=32, maximum=256, minimum=32)
60
- #height = gr.inputs.Slider(label="Height", default=256, step=32, maximum = 256, minimum=32)
61
- #images = gr.inputs.Slider(label="Images - How many images you wish to generate", default=2, step=1, minimum=1, maximum=4)
62
- #diversity = gr.inputs.Slider(label="Diversity scale - How different from one another you wish the images to be",default=5.0, minimum=1.0, maximum=15.0)
63
- #get_image_latent = gr.Button("Generate Image",css=css_mt)
64
- #
65
- # with gr.TabItem("ruDALLE"):
66
- # aspect = gr.inputs.Radio(label="Aspect Ratio", choices=["Square", "Horizontal", "Vertical"],default="Square")
67
- # model = gr.inputs.Dropdown(label="Model", choices=["Surrealism","Realism", "Emoji"], default="Surrealism")
68
- # get_image_rudalle = gr.Button("Generate Image",css=css_mt)
69
- #with gr.Row():
70
- #with gr.Tabs():
71
- # with gr.TabItem("Image output"):
72
- # image = gr.outputs.Image()
73
- # with gr.TabItem("Gallery output"):
74
- # gallery = gr.Gallery(label="Individual images")
 
 
 
 
75
 
76
- #get_image_latent.click(text2image_latent, inputs=[text,steps,width,height,images,diversity], outputs=[image,gallery])
77
- #get_image_rudalle.click(text2image_rudalle, inputs=[text,aspect,model], outputs=image)
78
- mindseye.launch(share=False)
 
44
  css_mt = {"margin-top": "1em"}
45
 
46
  empty = gr.outputs.HTML()
 
47
 
48
+ with gr.Blocks() as mindseye:
49
  gr.Markdown("<h1>MindsEye Lite <small><small>run multiple text-to-image models in one place</small></small></h1><p>MindsEye Lite orchestrates multiple text-to-image Hugging Face Spaces in one convenient space, so you can try different models. This work carries the spirit of <a href='https://multimodal.art/mindseye' target='_blank'>MindsEye Beta</a>, a tool to run multiple models with a single UI, but adjusted to the current hardware limitations of Spaces. MindsEye Lite was created by <a style='color: rgb(99, 102, 241);font-weight:bold' href='https://twitter.com/multimodalart' target='_blank'>@multimodalart</a>, keep up with the <a style='color: rgb(99, 102, 241);' href='https://multimodal.art/news' target='_blank'>latest multimodal ai art news here</a> and consider <a style='color: rgb(99, 102, 241);' href='https://www.patreon.com/multimodalart' target='_blank'>supporting us on Patreon</a></div></p>")
50
+ gr.Markdown("<style>.mx-auto.container .gr-form-gap {flex-direction: row; gap: calc(1rem * calc(1 - var(--tw-space-y-reverse)));} .mx-auto.container .gr-form-gap .flex-col, .mx-auto.container .gr-form-gap .gr-box{width: 100%}</style>")
51
+ text = gr.inputs.Textbox(placeholder="Try writing something..", label="Prompt")
 
52
  with gr.Column():
53
+ with gr.Row():
54
+ with gr.Tabs():
55
  with gr.TabItem("Latent Diffusion"):
56
  steps = gr.inputs.Slider(label="Steps - more steps can increase quality but will take longer to generate",default=45,maximum=50,minimum=1,step=1)
57
+ width = gr.inputs.Slider(label="Width", default=256, step=32, maximum=256, minimum=32)
58
+ height = gr.inputs.Slider(label="Height", default=256, step=32, maximum = 256, minimum=32)
59
+ images = gr.inputs.Slider(label="Images - How many images you wish to generate", default=2, step=1, minimum=1, maximum=4)
60
+ diversity = gr.inputs.Slider(label="Diversity scale - How different from one another you wish the images to be",default=5.0, minimum=1.0, maximum=15.0)
61
+ get_image_latent = gr.Button("Generate Image",css=css_mt)
62
+
63
+ with gr.TabItem("ruDALLE"):
64
+ aspect = gr.inputs.Radio(label="Aspect Ratio", choices=["Square", "Horizontal", "Vertical"],default="Square")
65
+ model = gr.inputs.Dropdown(label="Model", choices=["Surrealism","Realism", "Emoji"], default="Surrealism")
66
+ get_image_rudalle = gr.Button("Generate Image",css=css_mt)
67
+ with gr.TabItem("VQGAN+CLIP"):
68
+ pass
69
+ with gr.TabItem("V-Diffusion"):
70
+ pass
71
+ with gr.Row():
72
+ with gr.Tabs():
73
+ with gr.TabItem("Image output"):
74
+ image = gr.outputs.Image()
75
+ with gr.TabItem("Gallery output"):
76
+ gallery = gr.Gallery(label="Individual images")
77
 
78
+ get_image_latent.click(text2image_latent, inputs=[text,steps,width,height,images,diversity], outputs=[image,gallery])
79
+ get_image_rudalle.click(text2image_rudalle, inputs=[text,aspect,model], outputs=image)
80
+ mindseye.launch()