Spaces:
Running
on
Zero
Running
on
Zero
patrickvonplaten
commited on
Commit
•
ef9dee0
1
Parent(s):
1aabc2d
up
Browse files
app.py
CHANGED
@@ -96,8 +96,6 @@ def txt_to_img(
|
|
96 |
neg_prompt,
|
97 |
guidance,
|
98 |
steps,
|
99 |
-
width,
|
100 |
-
height,
|
101 |
generator,
|
102 |
):
|
103 |
pipe = MODELS[model_name].pipe_t2i
|
@@ -111,15 +109,13 @@ def txt_to_img(
|
|
111 |
negative_prompt=neg_prompt,
|
112 |
num_inference_steps=int(steps),
|
113 |
guidance_scale=guidance,
|
114 |
-
width=width,
|
115 |
-
height=height,
|
116 |
generator=generator,
|
117 |
output_type="latent",
|
118 |
).images
|
119 |
|
120 |
with torch.no_grad():
|
121 |
low_res_image = pipe.decode_latents(low_res_latents)
|
122 |
-
low_res_image = pipe.numpy_to_pil(low_res_image)
|
123 |
|
124 |
up_res_image = UPSCALER(
|
125 |
prompt=prompt,
|
@@ -128,7 +124,7 @@ def txt_to_img(
|
|
128 |
num_inference_steps=20,
|
129 |
guidance_scale=0,
|
130 |
generator=generator,
|
131 |
-
).images
|
132 |
|
133 |
pipe.to("cpu")
|
134 |
torch.cuda.empty_cache()
|
@@ -225,14 +221,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
225 |
step=1,
|
226 |
)
|
227 |
|
228 |
-
with gr.Row():
|
229 |
-
width = gr.Slider(
|
230 |
-
label="Width", value=512, minimum=64, maximum=1024, step=8
|
231 |
-
)
|
232 |
-
height = gr.Slider(
|
233 |
-
label="Height", value=512, minimum=64, maximum=1024, step=8
|
234 |
-
)
|
235 |
-
|
236 |
seed = gr.Slider(
|
237 |
0, 2147483647, label="Seed (0 = random)", value=0, step=1
|
238 |
)
|
@@ -242,8 +230,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
242 |
prompt,
|
243 |
guidance,
|
244 |
steps,
|
245 |
-
width,
|
246 |
-
height,
|
247 |
seed,
|
248 |
neg_prompt,
|
249 |
]
|
|
|
96 |
neg_prompt,
|
97 |
guidance,
|
98 |
steps,
|
|
|
|
|
99 |
generator,
|
100 |
):
|
101 |
pipe = MODELS[model_name].pipe_t2i
|
|
|
109 |
negative_prompt=neg_prompt,
|
110 |
num_inference_steps=int(steps),
|
111 |
guidance_scale=guidance,
|
|
|
|
|
112 |
generator=generator,
|
113 |
output_type="latent",
|
114 |
).images
|
115 |
|
116 |
with torch.no_grad():
|
117 |
low_res_image = pipe.decode_latents(low_res_latents)
|
118 |
+
low_res_image = pipe.numpy_to_pil(low_res_image)
|
119 |
|
120 |
up_res_image = UPSCALER(
|
121 |
prompt=prompt,
|
|
|
124 |
num_inference_steps=20,
|
125 |
guidance_scale=0,
|
126 |
generator=generator,
|
127 |
+
).images
|
128 |
|
129 |
pipe.to("cpu")
|
130 |
torch.cuda.empty_cache()
|
|
|
221 |
step=1,
|
222 |
)
|
223 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
seed = gr.Slider(
|
225 |
0, 2147483647, label="Seed (0 = random)", value=0, step=1
|
226 |
)
|
|
|
230 |
prompt,
|
231 |
guidance,
|
232 |
steps,
|
|
|
|
|
233 |
seed,
|
234 |
neg_prompt,
|
235 |
]
|