furkan gözükara ev pc
commited on
Commit
•
be3363a
1
Parent(s):
a464441
moved to main pipe
Browse files- app.py +8 -29
- requirements.txt +1 -1
app.py
CHANGED
@@ -7,7 +7,6 @@ import torch
|
|
7 |
from typing import List
|
8 |
from diffusers.utils import numpy_to_pil
|
9 |
from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline
|
10 |
-
from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS
|
11 |
from previewer.modules import Previewer
|
12 |
import os
|
13 |
import datetime
|
@@ -61,19 +60,6 @@ if torch.cuda.is_available():
|
|
61 |
prior_pipeline.prior = torch.compile(prior_pipeline.prior, mode="reduce-overhead", fullgraph=True)
|
62 |
decoder_pipeline.decoder = torch.compile(decoder_pipeline.decoder, mode="max-autotune", fullgraph=True)
|
63 |
|
64 |
-
if PREVIEW_IMAGES:
|
65 |
-
previewer = Previewer()
|
66 |
-
previewer.load_state_dict(torch.load("previewer/previewer_v1_100k.pt")["state_dict"])
|
67 |
-
previewer.eval().requires_grad_(False).to(device).to(dtype)
|
68 |
-
def callback_prior(i, t, latents):
|
69 |
-
output = previewer(latents)
|
70 |
-
output = numpy_to_pil(output.clamp(0, 1).permute(0, 2, 3, 1).float().cpu().numpy())
|
71 |
-
return output
|
72 |
-
callback_steps = 1
|
73 |
-
else:
|
74 |
-
previewer = None
|
75 |
-
callback_prior = None
|
76 |
-
callback_steps = None
|
77 |
else:
|
78 |
prior_pipeline = None
|
79 |
decoder_pipeline = None
|
@@ -96,40 +82,33 @@ def generate(
|
|
96 |
decoder_guidance_scale: float = 0.0,
|
97 |
batch_size_per_prompt: int = 2,
|
98 |
number_of_images_per_prompt: int = 1, # New parameter
|
99 |
-
)
|
100 |
images = [] # Initialize an empty list to collect generated images
|
101 |
original_seed = seed # Store the original seed value
|
102 |
for i in range(number_of_images_per_prompt):
|
103 |
if i > 0: # Update seed for subsequent iterations
|
104 |
seed = random.randint(0, MAX_SEED)
|
105 |
generator = torch.Generator().manual_seed(seed)
|
|
|
106 |
prior_output = prior_pipeline(
|
107 |
prompt=prompt,
|
108 |
height=height,
|
109 |
width=width,
|
110 |
-
|
111 |
-
timesteps=DEFAULT_STAGE_C_TIMESTEPS,
|
112 |
negative_prompt=negative_prompt,
|
113 |
guidance_scale=prior_guidance_scale,
|
114 |
num_images_per_prompt=batch_size_per_prompt,
|
115 |
-
|
116 |
-
callback=callback_prior,
|
117 |
-
callback_steps=callback_steps
|
118 |
)
|
119 |
|
120 |
-
if PREVIEW_IMAGES:
|
121 |
-
for _ in range(len(DEFAULT_STAGE_C_TIMESTEPS)):
|
122 |
-
r = next(prior_output)
|
123 |
-
prior_output = r
|
124 |
-
|
125 |
decoder_output = decoder_pipeline(
|
126 |
image_embeddings=prior_output.image_embeddings,
|
127 |
prompt=prompt,
|
128 |
-
num_inference_steps= decoder_num_inference_steps,
|
129 |
-
guidance_scale=decoder_guidance_scale,
|
130 |
negative_prompt=negative_prompt,
|
131 |
-
|
132 |
output_type="pil",
|
|
|
|
|
133 |
).images
|
134 |
|
135 |
# Append generated images to the images list
|
@@ -233,7 +212,7 @@ with gr.Blocks() as app:
|
|
233 |
minimum=1,
|
234 |
maximum=100,
|
235 |
step=1,
|
236 |
-
value=
|
237 |
)
|
238 |
with gr.Column():
|
239 |
decoder_num_inference_steps = gr.Slider(
|
|
|
7 |
from typing import List
|
8 |
from diffusers.utils import numpy_to_pil
|
9 |
from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline
|
|
|
10 |
from previewer.modules import Previewer
|
11 |
import os
|
12 |
import datetime
|
|
|
60 |
prior_pipeline.prior = torch.compile(prior_pipeline.prior, mode="reduce-overhead", fullgraph=True)
|
61 |
decoder_pipeline.decoder = torch.compile(decoder_pipeline.decoder, mode="max-autotune", fullgraph=True)
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
else:
|
64 |
prior_pipeline = None
|
65 |
decoder_pipeline = None
|
|
|
82 |
decoder_guidance_scale: float = 0.0,
|
83 |
batch_size_per_prompt: int = 2,
|
84 |
number_of_images_per_prompt: int = 1, # New parameter
|
85 |
+
):
|
86 |
images = [] # Initialize an empty list to collect generated images
|
87 |
original_seed = seed # Store the original seed value
|
88 |
for i in range(number_of_images_per_prompt):
|
89 |
if i > 0: # Update seed for subsequent iterations
|
90 |
seed = random.randint(0, MAX_SEED)
|
91 |
generator = torch.Generator().manual_seed(seed)
|
92 |
+
|
93 |
prior_output = prior_pipeline(
|
94 |
prompt=prompt,
|
95 |
height=height,
|
96 |
width=width,
|
97 |
+
generator=generator,
|
|
|
98 |
negative_prompt=negative_prompt,
|
99 |
guidance_scale=prior_guidance_scale,
|
100 |
num_images_per_prompt=batch_size_per_prompt,
|
101 |
+
num_inference_steps=prior_num_inference_steps
|
|
|
|
|
102 |
)
|
103 |
|
|
|
|
|
|
|
|
|
|
|
104 |
decoder_output = decoder_pipeline(
|
105 |
image_embeddings=prior_output.image_embeddings,
|
106 |
prompt=prompt,
|
|
|
|
|
107 |
negative_prompt=negative_prompt,
|
108 |
+
guidance_scale=decoder_guidance_scale,
|
109 |
output_type="pil",
|
110 |
+
generator=generator,
|
111 |
+
num_inference_steps=decoder_num_inference_steps
|
112 |
).images
|
113 |
|
114 |
# Append generated images to the images list
|
|
|
212 |
minimum=1,
|
213 |
maximum=100,
|
214 |
step=1,
|
215 |
+
value=30,
|
216 |
)
|
217 |
with gr.Column():
|
218 |
decoder_num_inference_steps = gr.Slider(
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
git+https://github.com/kashif/diffusers.git@
|
2 |
accelerate
|
3 |
safetensors
|
4 |
transformers
|
|
|
1 |
+
git+https://github.com/kashif/diffusers.git@wuerstchen-v3
|
2 |
accelerate
|
3 |
safetensors
|
4 |
transformers
|