apolinario commited on
Commit
84f6f2e
1 Parent(s): 071944b
Files changed (4) hide show
  1. README.md +1 -1
  2. app.py +54 -36
  3. gradio_queue.db +0 -0
  4. requirements.txt +1 -0
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 💻
4
  colorFrom: pink
5
  colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 2.9b12
8
  app_file: app.py
9
  pinned: false
10
  license: mit
4
  colorFrom: pink
5
  colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 2.9b21
8
  app_file: app.py
9
  pinned: false
10
  license: mit
app.py CHANGED
@@ -1,60 +1,78 @@
1
  from turtle import width
2
  import gradio as gr
3
  import random
4
- import inspect
 
 
 
 
 
5
  latent = gr.Interface.load("spaces/multimodalart/latentdiffusion")
6
- print(latent)
7
  rudalle = gr.Interface.load("spaces/multimodalart/rudalle")
8
- print(rudalle)
 
9
  #guided = gr.Interface.load("spaces/EleutherAI/clip-guided-diffusion")
10
  #print(guided)
11
  def text2image_latent(text,steps,width,height,images,diversity):
12
- image = latent(text, steps, width, height, images, diversity)[0]
13
- return(image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  def text2image_rudalle(text,aspect,model):
16
- print(text)
17
- print(aspect)
18
- print(model)
19
  image = rudalle(text,aspect,model)[0]
20
- print(image)
21
  return(image)
22
 
23
-
24
  #def text2image_guided(text):
25
  # image = guided(text, None, 10, 600, 0, 0, 0, random.randint(0,2147483647), None, 50, 32)
26
  # print(image)
27
  # image = image[0]
28
  # return(image)
29
 
30
- block = gr.Blocks()
 
 
31
 
32
  with gr.Blocks() as mindseye:
33
- text = gr.inputs.Textbox(placeholder="Try writing something..")
34
- with gr.Tabs():
35
- with gr.TabItem("Latent"):
36
- which_tab = "Latent Diffusion"
37
- steps = gr.inputs.Slider(label="Steps - more steps can increase quality but will take longer to generate",default=45,maximum=50,minimum=1,step=1)
38
- width = gr.inputs.Radio(label="Width", choices=[32,64,128,256],default=256)
39
- height = gr.inputs.Radio(label="Height", choices=[32,64,128,256],default=256)
40
- images = gr.inputs.Slider(label="Images - How many images you wish to generate", default=2, step=1, minimum=1, maximum=4)
41
- diversity = gr.inputs.Slider(label="Diversity scale - How different from one another you wish the images to be",default=5.0, minimum=1.0, maximum=15.0)
42
- get_image_latent = gr.Button("Generate Image")
43
- with gr.TabItem("Rudalle"):
44
- which_tab = "ruDALLE"
45
- aspect = gr.inputs.Radio(label="Aspect Ratio", choices=["Square", "Horizontal", "Vertical"],default="Square")
46
- model = gr.inputs.Dropdown(label="Model", choices=["Surrealism","Realism", "Emoji"], default="Surrealism")
47
- get_image_rudalle = gr.Button("Generate Image")
48
- #with gr.TabItem("guided"):
49
- # which_tab = "Guided Diffusion"
50
- # get_image_guided = gr.Button("Generate Image")
51
- #get_image = gr.Button("Generate Image")
52
- with gr.Column():
53
  with gr.Row():
54
- image = gr.outputs.Image()
 
 
 
 
55
 
56
- print(which_tab)
57
- get_image_latent.click(text2image_latent, inputs=[text,steps,width,height,images,diversity], outputs=image)
58
  get_image_rudalle.click(text2image_rudalle, inputs=[text,aspect,model], outputs=image)
59
- #get_image_guided.click(text2image_guided, inputs=text, outputs=image)
60
- mindseye.launch()
1
  from turtle import width
2
  import gradio as gr
3
  import random
4
+ import os
5
+ import io, base64
6
+ from PIL import Image
7
+ import numpy
8
+ import shortuuid
9
+
10
  latent = gr.Interface.load("spaces/multimodalart/latentdiffusion")
 
11
  rudalle = gr.Interface.load("spaces/multimodalart/rudalle")
12
+
13
+ #print(rudalle)
14
  #guided = gr.Interface.load("spaces/EleutherAI/clip-guided-diffusion")
15
  #print(guided)
16
  def text2image_latent(text,steps,width,height,images,diversity):
17
+ results = latent(text, steps, width, height, images, diversity)
18
+ image_paths = []
19
+ image_arrays = []
20
+ for image in results[1]:
21
+ image_str = image[0]
22
+ image_str = image_str.replace("data:image/png;base64,","")
23
+ decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
24
+ img = Image.open(io.BytesIO(decoded_bytes))
25
+ image_arrays.append(numpy.asarray(img))
26
+ #url = shortuuid.uuid()
27
+ #temp_dir = './tmp'
28
+ #if not os.path.exists(temp_dir):
29
+ # os.makedirs(temp_dir, exist_ok=True)
30
+ #image_path = f'{temp_dir}/{url}.png'
31
+ #img.save(f'{temp_dir}/{url}.png')
32
+ #image_paths.append(image_path)
33
+ return(results[0],image_arrays)
34
 
35
  def text2image_rudalle(text,aspect,model):
 
 
 
36
  image = rudalle(text,aspect,model)[0]
 
37
  return(image)
38
 
 
39
  #def text2image_guided(text):
40
  # image = guided(text, None, 10, 600, 0, 0, 0, random.randint(0,2147483647), None, 50, 32)
41
  # print(image)
42
  # image = image[0]
43
  # return(image)
44
 
45
+ css_mt = {"margin-top": "1em"}
46
+
47
+ empty = gr.outputs.HTML()
48
 
49
  with gr.Blocks() as mindseye:
50
+ gr.Markdown("# MindsEye Lite")
51
+ gr.Markdown("### Run multiple text-to-image models in one place")
52
+ gr.Markdown("<style>.mx-auto.container .gr-form-gap {flex-direction: row; gap: calc(1rem * calc(1 - var(--tw-space-y-reverse)));} .mx-auto.container .gr-form-gap .flex-col, .mx-auto.container .gr-form-gap .gr-box{width: 100%}</style>")
53
+ text = gr.inputs.Textbox(placeholder="Try writing something..", label="Prompt")
54
+ with gr.Column():
55
+ with gr.Row():
56
+ with gr.Tabs():
57
+ with gr.TabItem("Latent Diffusion"):
58
+ steps = gr.inputs.Slider(label="Steps - more steps can increase quality but will take longer to generate",default=45,maximum=50,minimum=1,step=1)
59
+ width = gr.inputs.Slider(label="Width", default=256, step=32, maximum=256, minimum=32)
60
+ height = gr.inputs.Slider(label="Height", default=256, step=32, maximum = 256, minimum=32)
61
+ images = gr.inputs.Slider(label="Images - How many images you wish to generate", default=2, step=1, minimum=1, maximum=4)
62
+ diversity = gr.inputs.Slider(label="Diversity scale - How different from one another you wish the images to be",default=5.0, minimum=1.0, maximum=15.0)
63
+ get_image_latent = gr.Button("Generate Image",css=css_mt)
64
+
65
+ with gr.TabItem("ruDALLE"):
66
+ aspect = gr.inputs.Radio(label="Aspect Ratio", choices=["Square", "Horizontal", "Vertical"],default="Square")
67
+ model = gr.inputs.Dropdown(label="Model", choices=["Surrealism","Realism", "Emoji"], default="Surrealism")
68
+ get_image_rudalle = gr.Button("Generate Image",css=css_mt)
 
69
  with gr.Row():
70
+ with gr.Tabs():
71
+ with gr.TabItem("Image output"):
72
+ image = gr.outputs.Image()
73
+ with gr.TabItem("Gallery output"):
74
+ gallery = gr.outputs.Carousel(label="Individual images",components=["image"])
75
 
76
+ get_image_latent.click(text2image_latent, inputs=[text,steps,width,height,images,diversity], outputs=[image,gallery])
 
77
  get_image_rudalle.click(text2image_rudalle, inputs=[text,aspect,model], outputs=image)
78
+ mindseye.launch(enable_queue=True)
 
gradio_queue.db ADDED
Binary file (16.4 kB). View file
requirements.txt ADDED
@@ -0,0 +1 @@
 
1
+ shortuuid