Spaces:
Sleeping
Sleeping
Update app.py
Browse filesWTF... It was defined....
app.py
CHANGED
@@ -15,10 +15,10 @@ upscaler = upscaler.to(device)
|
|
15 |
upscaler.enable_xformers_memory_efficient_attention()
|
16 |
|
17 |
|
18 |
-
def genie (Prompt, negative_prompt, height, width,
|
19 |
generator = torch.Generator(device=device).manual_seed(Seed)
|
20 |
if upscale == "Yes":
|
21 |
-
low_res_latents = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=
|
22 |
image = upscaler(Prompt, negative_prompt=negative_prompt, image=low_res_latents, num_inference_steps=5, guidance_scale=0, generator=generator).images[0]
|
23 |
else:
|
24 |
image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
|
@@ -28,7 +28,7 @@ gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generat
|
|
28 |
gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
|
29 |
gr.Slider(512, 1024, 768, step=128, label='Height'),
|
30 |
gr.Slider(512, 1024, 768, step=128, label='Width'),
|
31 |
-
gr.Slider(1, maximum=15, value=7, step=.25),
|
32 |
gr.Slider(25, maximum=100, value=50, step=25, label='Number of Iterations'),
|
33 |
gr.Slider(minimum=1, step=1, maximum=9999999999999999, randomize=True),
|
34 |
gr.Radio(["Yes", "No"], label='Upscale?', value='No'),
|
|
|
15 |
upscaler.enable_xformers_memory_efficient_attention()
|
16 |
|
17 |
|
18 |
+
def genie (Prompt, negative_prompt, height, width, scale, steps, Seed, upscale):
|
19 |
generator = torch.Generator(device=device).manual_seed(Seed)
|
20 |
if upscale == "Yes":
|
21 |
+
low_res_latents = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, generator=generator, output_type="latent").images
|
22 |
image = upscaler(Prompt, negative_prompt=negative_prompt, image=low_res_latents, num_inference_steps=5, guidance_scale=0, generator=generator).images[0]
|
23 |
else:
|
24 |
image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0]
|
|
|
28 |
gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
|
29 |
gr.Slider(512, 1024, 768, step=128, label='Height'),
|
30 |
gr.Slider(512, 1024, 768, step=128, label='Width'),
|
31 |
+
gr.Slider(1, maximum=15, value=7, step=.25, label='Guidance Scale'),
|
32 |
gr.Slider(25, maximum=100, value=50, step=25, label='Number of Iterations'),
|
33 |
gr.Slider(minimum=1, step=1, maximum=9999999999999999, randomize=True),
|
34 |
gr.Radio(["Yes", "No"], label='Upscale?', value='No'),
|