Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -49,6 +49,7 @@ def tpu_inference_api(
|
|
49 |
do_img2img: bool = False,
|
50 |
init_image: Optional[str] = None,
|
51 |
image2image_strength: float = 0,
|
|
|
52 |
) -> bytes:
|
53 |
url = os.getenv("TPU_INFERENCE_API")
|
54 |
if(randomize_seed):
|
@@ -67,7 +68,8 @@ def tpu_inference_api(
|
|
67 |
"seed": seed,
|
68 |
"do_img2img": do_img2img,
|
69 |
"image2image_strength": image2image_strength,
|
70 |
-
"init_image": init_image
|
|
|
71 |
}
|
72 |
response = requests.post(url, json=payload)
|
73 |
if response.status_code != 200:
|
@@ -155,7 +157,7 @@ def run(prompt, radio="model-v2", preset=PRESET_Q, h=1216, w=832, negative_promp
|
|
155 |
return zero_inference_api(prompt, radio, preset, h, w, negative_prompt, guidance_scale, randomize_seed, seed, do_img2img, init_image, image2image_strength)
|
156 |
|
157 |
@spaces.GPU
|
158 |
-
def zero_inference_api(prompt, radio="model-v2", preset=PRESET_Q, h=1216, w=832, negative_prompt=NEGATIVE_PROMPT, guidance_scale=4.0, randomize_seed=True, seed=42, do_img2img=False, init_image=None, image2image_strength=0, progress=gr.Progress(track_tqdm=True)):
|
159 |
prompt = prompt.strip() + ", " + preset.strip()
|
160 |
negative_prompt = negative_prompt.strip() if negative_prompt and negative_prompt.strip() else None
|
161 |
|
@@ -167,24 +169,26 @@ def zero_inference_api(prompt, radio="model-v2", preset=PRESET_Q, h=1216, w=832,
|
|
167 |
guidance_scale = 0.0
|
168 |
|
169 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
|
|
|
|
170 |
|
171 |
if not do_img2img:
|
172 |
if radio == "model-v2":
|
173 |
-
image = pipe(prompt, height=h, width=w, negative_prompt=negative_prompt, guidance_scale=guidance_scale, guidance_rescale=0.75, generator=generator, num_inference_steps=
|
174 |
else:
|
175 |
-
image = pipe2(prompt, height=h, width=w, negative_prompt=negative_prompt, guidance_scale=guidance_scale, guidance_rescale=0.75, generator=generator, num_inference_steps=
|
176 |
else:
|
177 |
init_image = Image.fromarray(init_image)
|
178 |
if radio == "model-v2":
|
179 |
-
image = pipe_img2img(prompt, image=init_image, strength=image2image_strength, negative_prompt=negative_prompt, guidance_scale=guidance_scale, generator=generator, num_inference_steps=
|
180 |
else:
|
181 |
-
image = pipe2_img2img(prompt, image=init_image, strength=image2image_strength, negative_prompt=negative_prompt, guidance_scale=guidance_scale, generator=generator, num_inference_steps=
|
182 |
|
183 |
naifix = prompt[:40].replace(":", "_").replace("\\", "_").replace("/", "_") + f" s-{seed}-"
|
184 |
with tempfile.NamedTemporaryFile(prefix=naifix, suffix=".png", delete=False) as tmpfile:
|
185 |
parameters = {
|
186 |
"prompt": prompt,
|
187 |
-
"steps":
|
188 |
"height": h,
|
189 |
"width": w,
|
190 |
"scale": guidance_scale,
|
@@ -247,6 +251,7 @@ with gr.Blocks(theme=theme) as demo:
|
|
247 |
|
248 |
preset = gr.Textbox(show_label=False, scale=5, value=PRESET_Q, info="Quality presets")
|
249 |
radio = gr.Radio(["model-v2-beta", "model-v2"], value="model-v2", label = "Choose the inference model")
|
|
|
250 |
with gr.Row():
|
251 |
height = gr.Slider(label="Height", value=1216, minimum=512, maximum=2560, step=64)
|
252 |
width = gr.Slider(label="Width", value=832, minimum=512, maximum=2560, step=64)
|
@@ -262,7 +267,7 @@ with gr.Blocks(theme=theme) as demo:
|
|
262 |
image2image_resize = gr.Checkbox(label="Resize input image", value=False, visible=False)
|
263 |
image2image_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Noising strength", value=0.7, visible=False)
|
264 |
|
265 |
-
with gr.Column():
|
266 |
output = gr.Image(type="filepath", interactive=False)
|
267 |
|
268 |
gr.Examples(fn=run, examples=["mayano_top_gun_\(umamusume\), 1girl, rurudo", "sho (sho lwlw),[[[ohisashiburi]]],fukuro daizi,tianliang duohe fangdongye,[daidai ookami],year_2023, (wariza), depth of field, official_art"], inputs=prompt, outputs=[output, seed], cache_examples="lazy")
|
@@ -278,8 +283,9 @@ with gr.Blocks(theme=theme) as demo:
|
|
278 |
prompt.submit
|
279 |
],
|
280 |
fn=run,
|
281 |
-
inputs=[prompt, radio, preset, height, width, negative_prompt, guidance_scale, randomize_seed, seed, tpu_inference, do_img2img, init_image, image2image_resize, image2image_strength],
|
282 |
outputs=[output, seed],
|
|
|
283 |
)
|
284 |
if __name__ == "__main__":
|
285 |
demo.launch(share=True)
|
|
|
49 |
do_img2img: bool = False,
|
50 |
init_image: Optional[str] = None,
|
51 |
image2image_strength: float = 0,
|
52 |
+
inference_steps = 25,
|
53 |
) -> bytes:
|
54 |
url = os.getenv("TPU_INFERENCE_API")
|
55 |
if(randomize_seed):
|
|
|
68 |
"seed": seed,
|
69 |
"do_img2img": do_img2img,
|
70 |
"image2image_strength": image2image_strength,
|
71 |
+
"init_image": init_image,
|
72 |
+
"inference_steps": inference_steps,
|
73 |
}
|
74 |
response = requests.post(url, json=payload)
|
75 |
if response.status_code != 200:
|
|
|
157 |
return zero_inference_api(prompt, radio, preset, h, w, negative_prompt, guidance_scale, randomize_seed, seed, do_img2img, init_image, image2image_strength)
|
158 |
|
159 |
@spaces.GPU
|
160 |
+
def zero_inference_api(prompt, radio="model-v2", preset=PRESET_Q, h=1216, w=832, negative_prompt=NEGATIVE_PROMPT, guidance_scale=4.0, randomize_seed=True, seed=42, do_img2img=False, init_image=None, image2image_strength=0, inference_steps=25, progress=gr.Progress(track_tqdm=True)):
|
161 |
prompt = prompt.strip() + ", " + preset.strip()
|
162 |
negative_prompt = negative_prompt.strip() if negative_prompt and negative_prompt.strip() else None
|
163 |
|
|
|
169 |
guidance_scale = 0.0
|
170 |
|
171 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
172 |
+
if inference_steps > 50:
|
173 |
+
inference_steps = 50
|
174 |
|
175 |
if not do_img2img:
|
176 |
if radio == "model-v2":
|
177 |
+
image = pipe(prompt, height=h, width=w, negative_prompt=negative_prompt, guidance_scale=guidance_scale, guidance_rescale=0.75, generator=generator, num_inference_steps=inference_steps).images[0]
|
178 |
else:
|
179 |
+
image = pipe2(prompt, height=h, width=w, negative_prompt=negative_prompt, guidance_scale=guidance_scale, guidance_rescale=0.75, generator=generator, num_inference_steps=inference_steps).images[0]
|
180 |
else:
|
181 |
init_image = Image.fromarray(init_image)
|
182 |
if radio == "model-v2":
|
183 |
+
image = pipe_img2img(prompt, image=init_image, strength=image2image_strength, negative_prompt=negative_prompt, guidance_scale=guidance_scale, generator=generator, num_inference_steps=inference_steps).images[0]
|
184 |
else:
|
185 |
+
image = pipe2_img2img(prompt, image=init_image, strength=image2image_strength, negative_prompt=negative_prompt, guidance_scale=guidance_scale, generator=generator, num_inference_steps=inference_steps).images[0]
|
186 |
|
187 |
naifix = prompt[:40].replace(":", "_").replace("\\", "_").replace("/", "_") + f" s-{seed}-"
|
188 |
with tempfile.NamedTemporaryFile(prefix=naifix, suffix=".png", delete=False) as tmpfile:
|
189 |
parameters = {
|
190 |
"prompt": prompt,
|
191 |
+
"steps": inference_steps,
|
192 |
"height": h,
|
193 |
"width": w,
|
194 |
"scale": guidance_scale,
|
|
|
251 |
|
252 |
preset = gr.Textbox(show_label=False, scale=5, value=PRESET_Q, info="Quality presets")
|
253 |
radio = gr.Radio(["model-v2-beta", "model-v2"], value="model-v2", label = "Choose the inference model")
|
254 |
+
inference_steps = gr.Slider(label="Inference Steps", value=25, minimum=4, maximum=50, step=1)
|
255 |
with gr.Row():
|
256 |
height = gr.Slider(label="Height", value=1216, minimum=512, maximum=2560, step=64)
|
257 |
width = gr.Slider(label="Width", value=832, minimum=512, maximum=2560, step=64)
|
|
|
267 |
image2image_resize = gr.Checkbox(label="Resize input image", value=False, visible=False)
|
268 |
image2image_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label="Noising strength", value=0.7, visible=False)
|
269 |
|
270 |
+
with gr.Column(scale=2.5):
|
271 |
output = gr.Image(type="filepath", interactive=False)
|
272 |
|
273 |
gr.Examples(fn=run, examples=["mayano_top_gun_\(umamusume\), 1girl, rurudo", "sho (sho lwlw),[[[ohisashiburi]]],fukuro daizi,tianliang duohe fangdongye,[daidai ookami],year_2023, (wariza), depth of field, official_art"], inputs=prompt, outputs=[output, seed], cache_examples="lazy")
|
|
|
283 |
prompt.submit
|
284 |
],
|
285 |
fn=run,
|
286 |
+
inputs=[prompt, radio, preset, height, width, negative_prompt, guidance_scale, randomize_seed, seed, tpu_inference, do_img2img, init_image, image2image_resize, image2image_strength, inference_steps],
|
287 |
outputs=[output, seed],
|
288 |
+
concurrency_limit=1,
|
289 |
)
|
290 |
if __name__ == "__main__":
|
291 |
demo.launch(share=True)
|