Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -43,7 +43,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
43 |
upscaler.enable_xformers_memory_efficient_attention()
|
44 |
upscaler = upscaler.to(device)
|
45 |
torch.cuda.empty_cache()
|
46 |
-
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
47 |
torch.cuda.empty_cache()
|
48 |
return upscaled
|
49 |
else:
|
@@ -69,7 +69,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
69 |
refiner.enable_xformers_memory_efficient_attention()
|
70 |
refiner = refiner.to(device)
|
71 |
torch.cuda.empty_cache()
|
72 |
-
upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
73 |
torch.cuda.empty_cache()
|
74 |
return upscaled
|
75 |
else:
|
@@ -81,7 +81,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
81 |
upscaler.enable_xformers_memory_efficient_attention()
|
82 |
upscaler = upscaler.to(device)
|
83 |
torch.cuda.empty_cache()
|
84 |
-
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
85 |
torch.cuda.empty_cache()
|
86 |
return upscaled
|
87 |
else:
|
@@ -108,7 +108,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
108 |
refiner.enable_xformers_memory_efficient_attention()
|
109 |
refiner = refiner.to(device)
|
110 |
torch.cuda.empty_cache()
|
111 |
-
upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
112 |
torch.cuda.empty_cache()
|
113 |
return upscaled
|
114 |
else:
|
@@ -120,7 +120,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
120 |
upscaler.enable_xformers_memory_efficient_attention()
|
121 |
upscaler = upscaler.to(device)
|
122 |
torch.cuda.empty_cache()
|
123 |
-
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
124 |
torch.cuda.empty_cache()
|
125 |
return upscaled
|
126 |
else:
|
@@ -147,7 +147,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
147 |
refiner.enable_xformers_memory_efficient_attention()
|
148 |
refiner = refiner.to(device)
|
149 |
torch.cuda.empty_cache()
|
150 |
-
upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
151 |
torch.cuda.empty_cache()
|
152 |
return upscaled
|
153 |
else:
|
@@ -160,7 +160,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
160 |
upscaler.enable_xformers_memory_efficient_attention()
|
161 |
upscaler = upscaler.to(device)
|
162 |
torch.cuda.empty_cache()
|
163 |
-
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
164 |
torch.cuda.empty_cache()
|
165 |
return upscaled
|
166 |
else:
|
@@ -188,7 +188,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
188 |
refiner.enable_xformers_memory_efficient_attention()
|
189 |
refiner = refiner.to(device)
|
190 |
torch.cuda.empty_cache()
|
191 |
-
upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
192 |
torch.cuda.empty_cache()
|
193 |
return upscaled
|
194 |
else:
|
@@ -201,7 +201,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
201 |
upscaler.enable_xformers_memory_efficient_attention()
|
202 |
upscaler = upscaler.to(device)
|
203 |
torch.cuda.empty_cache()
|
204 |
-
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
205 |
torch.cuda.empty_cache()
|
206 |
return upscaled
|
207 |
else:
|
@@ -232,7 +232,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
232 |
animagine.enable_xformers_memory_efficient_attention()
|
233 |
animagine = animagine.to(device)
|
234 |
torch.cuda.empty_cache()
|
235 |
-
upscaled = animagine(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
236 |
torch.cuda.empty_cache()
|
237 |
return upscaled
|
238 |
else:
|
@@ -245,7 +245,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
245 |
upscaler.enable_xformers_memory_efficient_attention()
|
246 |
upscaler = upscaler.to(device)
|
247 |
torch.cuda.empty_cache()
|
248 |
-
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
249 |
torch.cuda.empty_cache()
|
250 |
return upscaled
|
251 |
else:
|
@@ -280,7 +280,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
280 |
sdxl.enable_xformers_memory_efficient_attention()
|
281 |
sdxl = sdxl.to(device)
|
282 |
torch.cuda.empty_cache()
|
283 |
-
upscaled = sdxl(prompt=Prompt, negative_prompt=negative_prompt, image=refined, num_inference_steps=
|
284 |
torch.cuda.empty_cache()
|
285 |
return upscaled
|
286 |
else:
|
@@ -294,7 +294,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
294 |
upscaler.enable_xformers_memory_efficient_attention()
|
295 |
upscaler = upscaler.to(device)
|
296 |
torch.cuda.empty_cache()
|
297 |
-
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
298 |
torch.cuda.empty_cache()
|
299 |
return upscaled
|
300 |
else:
|
@@ -326,7 +326,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
326 |
pipe.enable_xformers_memory_efficient_attention()
|
327 |
pipe = pipe.to(device)
|
328 |
torch.cuda.empty_cache()
|
329 |
-
upscaled = pipe(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
330 |
torch.cuda.empty_cache()
|
331 |
return upscaled
|
332 |
else:
|
@@ -342,7 +342,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
342 |
pipe.enable_xformers_memory_efficient_attention()
|
343 |
pipe = pipe.to(device)
|
344 |
torch.cuda.empty_cache()
|
345 |
-
upscaled = pipe(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=
|
346 |
torch.cuda.empty_cache()
|
347 |
return upscaled
|
348 |
else:
|
@@ -355,8 +355,8 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
355 |
gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Anime', 'Disney', 'StoryBook', 'SemiReal', 'Animagine XL 3.0', 'SDXL 1.0', 'FusionXL'], value='PhotoReal', label='Choose Model'),
|
356 |
gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|
357 |
gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
|
358 |
-
gr.Slider(512,
|
359 |
-
gr.Slider(512,
|
360 |
gr.Slider(1, maximum=15, value=5, step=.25, label='Guidance Scale'),
|
361 |
gr.Slider(5, maximum=100, value=50, step=5, label='Number of Iterations'),
|
362 |
gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random'),
|
|
|
43 |
upscaler.enable_xformers_memory_efficient_attention()
|
44 |
upscaler = upscaler.to(device)
|
45 |
torch.cuda.empty_cache()
|
46 |
+
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
47 |
torch.cuda.empty_cache()
|
48 |
return upscaled
|
49 |
else:
|
|
|
69 |
refiner.enable_xformers_memory_efficient_attention()
|
70 |
refiner = refiner.to(device)
|
71 |
torch.cuda.empty_cache()
|
72 |
+
upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
73 |
torch.cuda.empty_cache()
|
74 |
return upscaled
|
75 |
else:
|
|
|
81 |
upscaler.enable_xformers_memory_efficient_attention()
|
82 |
upscaler = upscaler.to(device)
|
83 |
torch.cuda.empty_cache()
|
84 |
+
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
85 |
torch.cuda.empty_cache()
|
86 |
return upscaled
|
87 |
else:
|
|
|
108 |
refiner.enable_xformers_memory_efficient_attention()
|
109 |
refiner = refiner.to(device)
|
110 |
torch.cuda.empty_cache()
|
111 |
+
upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
112 |
torch.cuda.empty_cache()
|
113 |
return upscaled
|
114 |
else:
|
|
|
120 |
upscaler.enable_xformers_memory_efficient_attention()
|
121 |
upscaler = upscaler.to(device)
|
122 |
torch.cuda.empty_cache()
|
123 |
+
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
124 |
torch.cuda.empty_cache()
|
125 |
return upscaled
|
126 |
else:
|
|
|
147 |
refiner.enable_xformers_memory_efficient_attention()
|
148 |
refiner = refiner.to(device)
|
149 |
torch.cuda.empty_cache()
|
150 |
+
upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
151 |
torch.cuda.empty_cache()
|
152 |
return upscaled
|
153 |
else:
|
|
|
160 |
upscaler.enable_xformers_memory_efficient_attention()
|
161 |
upscaler = upscaler.to(device)
|
162 |
torch.cuda.empty_cache()
|
163 |
+
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
164 |
torch.cuda.empty_cache()
|
165 |
return upscaled
|
166 |
else:
|
|
|
188 |
refiner.enable_xformers_memory_efficient_attention()
|
189 |
refiner = refiner.to(device)
|
190 |
torch.cuda.empty_cache()
|
191 |
+
upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
192 |
torch.cuda.empty_cache()
|
193 |
return upscaled
|
194 |
else:
|
|
|
201 |
upscaler.enable_xformers_memory_efficient_attention()
|
202 |
upscaler = upscaler.to(device)
|
203 |
torch.cuda.empty_cache()
|
204 |
+
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
205 |
torch.cuda.empty_cache()
|
206 |
return upscaled
|
207 |
else:
|
|
|
232 |
animagine.enable_xformers_memory_efficient_attention()
|
233 |
animagine = animagine.to(device)
|
234 |
torch.cuda.empty_cache()
|
235 |
+
upscaled = animagine(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
236 |
torch.cuda.empty_cache()
|
237 |
return upscaled
|
238 |
else:
|
|
|
245 |
upscaler.enable_xformers_memory_efficient_attention()
|
246 |
upscaler = upscaler.to(device)
|
247 |
torch.cuda.empty_cache()
|
248 |
+
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
249 |
torch.cuda.empty_cache()
|
250 |
return upscaled
|
251 |
else:
|
|
|
280 |
sdxl.enable_xformers_memory_efficient_attention()
|
281 |
sdxl = sdxl.to(device)
|
282 |
torch.cuda.empty_cache()
|
283 |
+
upscaled = sdxl(prompt=Prompt, negative_prompt=negative_prompt, image=refined, num_inference_steps=5, guidance_scale=0).images[0]
|
284 |
torch.cuda.empty_cache()
|
285 |
return upscaled
|
286 |
else:
|
|
|
294 |
upscaler.enable_xformers_memory_efficient_attention()
|
295 |
upscaler = upscaler.to(device)
|
296 |
torch.cuda.empty_cache()
|
297 |
+
upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
298 |
torch.cuda.empty_cache()
|
299 |
return upscaled
|
300 |
else:
|
|
|
326 |
pipe.enable_xformers_memory_efficient_attention()
|
327 |
pipe = pipe.to(device)
|
328 |
torch.cuda.empty_cache()
|
329 |
+
upscaled = pipe(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
330 |
torch.cuda.empty_cache()
|
331 |
return upscaled
|
332 |
else:
|
|
|
342 |
pipe.enable_xformers_memory_efficient_attention()
|
343 |
pipe = pipe.to(device)
|
344 |
torch.cuda.empty_cache()
|
345 |
+
upscaled = pipe(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
|
346 |
torch.cuda.empty_cache()
|
347 |
return upscaled
|
348 |
else:
|
|
|
355 |
gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Anime', 'Disney', 'StoryBook', 'SemiReal', 'Animagine XL 3.0', 'SDXL 1.0', 'FusionXL'], value='PhotoReal', label='Choose Model'),
|
356 |
gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
|
357 |
gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
|
358 |
+
gr.Slider(512, 1280, 768, step=128, label='Height'),
|
359 |
+
gr.Slider(512, 1280, 768, step=128, label='Width'),
|
360 |
gr.Slider(1, maximum=15, value=5, step=.25, label='Guidance Scale'),
|
361 |
gr.Slider(5, maximum=100, value=50, step=5, label='Number of Iterations'),
|
362 |
gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random'),
|