Spaces:
Sleeping
Sleeping
lichorosario
commited on
Commit
•
1af9d99
1
Parent(s):
f209032
feat: Agregar función de refinamiento de imágenes en app.py
Browse filesSe ha agregado una nueva función llamada `refine_image` en el archivo app.py. Esta función permite aplicar un refinamiento a las imágenes generadas utilizando un modelo de refinería. Los parámetros de refinamiento, como la fuerza, el número de pasos de inferencia y la escala de guía, se pueden ajustar según las necesidades del usuario. Esta mejora proporciona mayor flexibilidad y control sobre el proceso de generación de imágenes.
app.py
CHANGED
@@ -5,6 +5,7 @@ import json
|
|
5 |
from gradio_client import Client, handle_file
|
6 |
from gradio_imageslider import ImageSlider
|
7 |
from PIL import Image
|
|
|
8 |
|
9 |
with open('loras.json', 'r') as f:
|
10 |
loras = json.load(f)
|
@@ -168,6 +169,21 @@ def upscale_image(image, resolution, num_inference_steps, strength, hdr, guidanc
|
|
168 |
return result
|
169 |
|
170 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
172 |
|
173 |
css="""
|
@@ -261,6 +277,50 @@ with gr.Blocks(css=css) as demo:
|
|
261 |
value=1.0
|
262 |
)
|
263 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
264 |
with gr.Column(scale=1):
|
265 |
gallery = gr.Gallery(
|
266 |
[(item["image"], item["title"]) for item in loras],
|
@@ -282,7 +342,7 @@ with gr.Blocks(css=css) as demo:
|
|
282 |
with gr.Column():
|
283 |
output_slider = ImageSlider(label="Before / After", type="filepath", show_download_button=False)
|
284 |
|
285 |
-
with gr.Accordion("
|
286 |
upscale_reduce_factor = gr.Slider(minimum=1, maximum=10, step=1, label="Reduce Factor", info="1/n")
|
287 |
upscale_resolution = gr.Slider(minimum=128, maximum=2048, value=1024, step=128, label="Resolution", info="Image width")
|
288 |
upscale_num_inference_steps = gr.Slider(minimum=1, maximum=150, value=50, step=1, label="Number of Inference Steps")
|
@@ -303,7 +363,11 @@ with gr.Blocks(css=css) as demo:
|
|
303 |
fn=infer,
|
304 |
inputs=[selected_index, prompt_in, style_prompt_in, inf_steps, guidance_scale, width, height, seed, lora_weight],
|
305 |
outputs=[generated_image, last_used_seed, used_prompt]
|
|
|
|
|
|
|
306 |
)
|
|
|
307 |
cancel_btn.click(
|
308 |
fn=cancel_infer,
|
309 |
outputs=[]
|
|
|
5 |
from gradio_client import Client, handle_file
|
6 |
from gradio_imageslider import ImageSlider
|
7 |
from PIL import Image
|
8 |
+
client = InferenceClient()
|
9 |
|
10 |
with open('loras.json', 'r') as f:
|
11 |
loras = json.load(f)
|
|
|
169 |
return result
|
170 |
|
171 |
|
172 |
+
def refine_image(apply_refiner, image, model ,prompt, negative_prompt, num_inference_steps, guidance_scale, seed, strength):
|
173 |
+
if (not apply_refiner):
|
174 |
+
return [image, image]
|
175 |
+
|
176 |
+
refined_image = client.image_to_image(
|
177 |
+
image,
|
178 |
+
prompt=prompt,
|
179 |
+
negative_prompt=negative_prompt,
|
180 |
+
num_inference_steps=num_inference_steps,
|
181 |
+
guidance_scale=guidance_scale,
|
182 |
+
seed=seed,
|
183 |
+
model=model,
|
184 |
+
strength=strength
|
185 |
+
)
|
186 |
+
return [image, refined_image]
|
187 |
|
188 |
|
189 |
css="""
|
|
|
277 |
value=1.0
|
278 |
)
|
279 |
|
280 |
+
with gr.Group():
|
281 |
+
apply_refiner = gr.Checkbox(label="Apply refiner", value=False)
|
282 |
+
with gr.Accordion("Refiner params", open=False) as refiner_params:
|
283 |
+
refiner_prompt = gr.Textbox(lines=3, label="Prompt")
|
284 |
+
refiner_negative_prompt = gr.Textbox(lines=3, label="Negative Prompt")
|
285 |
+
refiner_strength = gr.Slider(
|
286 |
+
label="Strength",
|
287 |
+
minimum=0,
|
288 |
+
maximum=300,
|
289 |
+
step=0.01,
|
290 |
+
value=1
|
291 |
+
)
|
292 |
+
refiner_num_inference_steps = gr.Slider(
|
293 |
+
label="Inference steps",
|
294 |
+
minimum=3,
|
295 |
+
maximum=300,
|
296 |
+
step=1,
|
297 |
+
value=25
|
298 |
+
)
|
299 |
+
refiner_guidance_scale = gr.Slider(
|
300 |
+
label="Guidance scale",
|
301 |
+
minimum=0.0,
|
302 |
+
maximum=50.0,
|
303 |
+
step=0.1,
|
304 |
+
value=12
|
305 |
+
)
|
306 |
+
refiner_seed = gr.Slider(
|
307 |
+
label="Seed",
|
308 |
+
info="-1 denotes a random seed",
|
309 |
+
minimum=-1,
|
310 |
+
maximum=423538377342,
|
311 |
+
step=1,
|
312 |
+
value=-1
|
313 |
+
)
|
314 |
+
refiner_model = gr.Textbox(label="Model", value="stabilityai/stable-diffusion-xl-refiner-1.0")
|
315 |
+
|
316 |
+
apply_refiner.change(
|
317 |
+
fn=lambda x: gr.update(visible=x),
|
318 |
+
inputs=apply_refiner,
|
319 |
+
outputs=refiner_params,
|
320 |
+
queue=False,
|
321 |
+
api_name=False,
|
322 |
+
)
|
323 |
+
|
324 |
with gr.Column(scale=1):
|
325 |
gallery = gr.Gallery(
|
326 |
[(item["image"], item["title"]) for item in loras],
|
|
|
342 |
with gr.Column():
|
343 |
output_slider = ImageSlider(label="Before / After", type="filepath", show_download_button=False)
|
344 |
|
345 |
+
with gr.Accordion("Enhacer params", open=False):
|
346 |
upscale_reduce_factor = gr.Slider(minimum=1, maximum=10, step=1, label="Reduce Factor", info="1/n")
|
347 |
upscale_resolution = gr.Slider(minimum=128, maximum=2048, value=1024, step=128, label="Resolution", info="Image width")
|
348 |
upscale_num_inference_steps = gr.Slider(minimum=1, maximum=150, value=50, step=1, label="Number of Inference Steps")
|
|
|
363 |
fn=infer,
|
364 |
inputs=[selected_index, prompt_in, style_prompt_in, inf_steps, guidance_scale, width, height, seed, lora_weight],
|
365 |
outputs=[generated_image, last_used_seed, used_prompt]
|
366 |
+
).then(refine_image,
|
367 |
+
[apply_refiner, generated_image, refiner_model, refiner_prompt, refiner_negative_prompt, refiner_num_inference_steps, refiner_guidance_scale, refiner_seed, refiner_strength],
|
368 |
+
generated_image
|
369 |
)
|
370 |
+
|
371 |
cancel_btn.click(
|
372 |
fn=cancel_infer,
|
373 |
outputs=[]
|