Spaces:
Running
on
Zero
Running
on
Zero
silveroxides
commited on
Commit
•
cad50ee
1
Parent(s):
ca0e0c7
Update app.py
Browse files
app.py
CHANGED
@@ -75,18 +75,21 @@ vae_hybridpony = AutoencoderKL.from_pretrained(os.path.join(ckpt_dir_hybridpony,
|
|
75 |
pipe_realpony = StableDiffusionXLPipeline.from_pretrained(
|
76 |
ckpt_dir_realpony,
|
77 |
vae=vae_realpony,
|
|
|
78 |
torch_dtype=torch.float16,
|
79 |
use_safetensors=True,
|
80 |
)
|
81 |
pipe_ultpony = StableDiffusionXLPipeline.from_pretrained(
|
82 |
ckpt_dir_ultpony,
|
83 |
vae=vae_ultpony,
|
|
|
84 |
torch_dtype=torch.float16,
|
85 |
use_safetensors=True,
|
86 |
)
|
87 |
pipe_hybridpony = StableDiffusionXLPipeline.from_pretrained(
|
88 |
ckpt_dir_hybridpony,
|
89 |
vae=vae_hybridpony,
|
|
|
90 |
torch_dtype=torch.float16,
|
91 |
use_safetensors=True,
|
92 |
)
|
@@ -204,7 +207,7 @@ def upscale_image(image, scale):
|
|
204 |
|
205 |
@spaces.GPU(duration=120)
|
206 |
def generate_image(model_choice, additional_positive_prompt, additional_negative_prompt, height, width, num_inference_steps,
|
207 |
-
guidance_scale, num_images_per_prompt, use_random_seed, seed, sampler, clip_skip,
|
208 |
use_florence2, use_medium_enhancer, use_long_enhancer,
|
209 |
use_positive_prefix, use_positive_suffix, use_negative_prefix, use_negative_suffix,
|
210 |
use_upscaler, upscale_factor,
|
@@ -271,6 +274,8 @@ def generate_image(model_choice, additional_positive_prompt, additional_negative
|
|
271 |
width=width,
|
272 |
num_inference_steps=num_inference_steps,
|
273 |
guidance_scale=guidance_scale,
|
|
|
|
|
274 |
num_images_per_prompt=num_images_per_prompt,
|
275 |
generator=torch.Generator(pipe.device).manual_seed(seed)
|
276 |
).images
|
@@ -323,6 +328,8 @@ with gr.Blocks(theme='bethecloud/storj_theme') as demo:
|
|
323 |
width = gr.Slider(512, 2048, 1024, step=64, label="Width")
|
324 |
num_inference_steps = gr.Slider(20, 100, 30, step=1, label="Number of Inference Steps")
|
325 |
guidance_scale = gr.Slider(1, 20, 6, step=0.1, label="Guidance Scale")
|
|
|
|
|
326 |
num_images_per_prompt = gr.Slider(1, 4, 1, step=1, label="Number of images per prompt")
|
327 |
use_random_seed = gr.Checkbox(label="Use Random Seed", value=True)
|
328 |
seed = gr.Number(label="Seed", value=0, precision=0)
|
@@ -374,7 +381,7 @@ with gr.Blocks(theme='bethecloud/storj_theme') as demo:
|
|
374 |
inputs=[
|
375 |
model_choice, # Add this new input
|
376 |
positive_prompt, negative_prompt, height, width, num_inference_steps,
|
377 |
-
guidance_scale, num_images_per_prompt, use_random_seed, seed, sampler,
|
378 |
clip_skip, use_florence2, use_medium_enhancer, use_long_enhancer,
|
379 |
use_positive_prefix, use_positive_suffix, use_negative_prefix, use_negative_suffix,
|
380 |
use_upscaler, upscale_factor,
|
|
|
75 |
pipe_realpony = StableDiffusionXLPipeline.from_pretrained(
|
76 |
ckpt_dir_realpony,
|
77 |
vae=vae_realpony,
|
78 |
+
custom_pipeline="multimodalart/sdxl_perturbed_attention_guidance",
|
79 |
torch_dtype=torch.float16,
|
80 |
use_safetensors=True,
|
81 |
)
|
82 |
pipe_ultpony = StableDiffusionXLPipeline.from_pretrained(
|
83 |
ckpt_dir_ultpony,
|
84 |
vae=vae_ultpony,
|
85 |
+
custom_pipeline="multimodalart/sdxl_perturbed_attention_guidance",
|
86 |
torch_dtype=torch.float16,
|
87 |
use_safetensors=True,
|
88 |
)
|
89 |
pipe_hybridpony = StableDiffusionXLPipeline.from_pretrained(
|
90 |
ckpt_dir_hybridpony,
|
91 |
vae=vae_hybridpony,
|
92 |
+
custom_pipeline="multimodalart/sdxl_perturbed_attention_guidance",
|
93 |
torch_dtype=torch.float16,
|
94 |
use_safetensors=True,
|
95 |
)
|
|
|
207 |
|
208 |
@spaces.GPU(duration=120)
|
209 |
def generate_image(model_choice, additional_positive_prompt, additional_negative_prompt, height, width, num_inference_steps,
|
210 |
+
guidance_scale, pag_scale, pag_layers, num_images_per_prompt, use_random_seed, seed, sampler, clip_skip,
|
211 |
use_florence2, use_medium_enhancer, use_long_enhancer,
|
212 |
use_positive_prefix, use_positive_suffix, use_negative_prefix, use_negative_suffix,
|
213 |
use_upscaler, upscale_factor,
|
|
|
274 |
width=width,
|
275 |
num_inference_steps=num_inference_steps,
|
276 |
guidance_scale=guidance_scale,
|
277 |
+
pag_scale=pag_scale,
|
278 |
+
pag_layers=pag_layers,
|
279 |
num_images_per_prompt=num_images_per_prompt,
|
280 |
generator=torch.Generator(pipe.device).manual_seed(seed)
|
281 |
).images
|
|
|
328 |
width = gr.Slider(512, 2048, 1024, step=64, label="Width")
|
329 |
num_inference_steps = gr.Slider(20, 100, 30, step=1, label="Number of Inference Steps")
|
330 |
guidance_scale = gr.Slider(1, 20, 6, step=0.1, label="Guidance Scale")
|
331 |
+
pag_scale = gr.Number(label="Pag Scale", value=3.0)
|
332 |
+
pag_layers = gr.Dropdown(label="Model layers to apply Pag to", info="mid is the one used on the paper, up and down blocks seem unstable", choices=["up", "mid", "down"], multiselect=True, value="mid")
|
333 |
num_images_per_prompt = gr.Slider(1, 4, 1, step=1, label="Number of images per prompt")
|
334 |
use_random_seed = gr.Checkbox(label="Use Random Seed", value=True)
|
335 |
seed = gr.Number(label="Seed", value=0, precision=0)
|
|
|
381 |
inputs=[
|
382 |
model_choice, # Add this new input
|
383 |
positive_prompt, negative_prompt, height, width, num_inference_steps,
|
384 |
+
guidance_scale, pag_scale, pag_layers, num_images_per_prompt, use_random_seed, seed, sampler,
|
385 |
clip_skip, use_florence2, use_medium_enhancer, use_long_enhancer,
|
386 |
use_positive_prefix, use_positive_suffix, use_negative_prefix, use_negative_suffix,
|
387 |
use_upscaler, upscale_factor,
|