Spaces:
Sleeping
Sleeping
k-l-lambda
commited on
Commit
•
97ebe70
1
Parent(s):
43230a0
app.py: fixed controlnet options.
Browse files
app.py
CHANGED
@@ -136,7 +136,7 @@ CONTROLNET_DICT = dict(
|
|
136 |
'preprocessor': 'canny',
|
137 |
},
|
138 |
lineart={
|
139 |
-
'model_name': 'controlnet-softedge-
|
140 |
'strength': 1,
|
141 |
'preprocessor': 'lineart',
|
142 |
},
|
@@ -269,7 +269,7 @@ def generate_image (
|
|
269 |
num_steps,
|
270 |
identitynet_strength_ratio,
|
271 |
adapter_strength_ratio,
|
272 |
-
|
273 |
controlnet_selection,
|
274 |
guidance_scale,
|
275 |
seed,
|
@@ -313,7 +313,7 @@ def generate_image (
|
|
313 |
CONTROLNET_DICT['canny']['strength'],
|
314 |
CONTROLNET_DICT['depth']['strength'],
|
315 |
CONTROLNET_DICT['lineart']['strength'],
|
316 |
-
) =
|
317 |
|
318 |
face_image_uploaded, ref_image_uploaded = upload_assets_with_cache(client, [face_image_path, ref_image_path])
|
319 |
|
@@ -474,7 +474,7 @@ with gr.Blocks(css=css) as demo:
|
|
474 |
)
|
475 |
with gr.Accordion('Controlnet'):
|
476 |
controlnet_selection = gr.CheckboxGroup(
|
477 |
-
|
478 |
info='Use pose for skeleton inference, canny for edge detection, and depth for depth map estimation. You can try all three to control the generation process'
|
479 |
)
|
480 |
pose_strength = gr.Slider(
|
@@ -580,12 +580,12 @@ with gr.Blocks(css=css) as demo:
|
|
580 |
num_steps,
|
581 |
identitynet_strength_ratio,
|
582 |
adapter_strength_ratio,
|
583 |
-
[
|
584 |
pose_strength,
|
585 |
canny_strength,
|
586 |
depth_strength,
|
587 |
lineart_strength,
|
588 |
-
],
|
589 |
controlnet_selection,
|
590 |
guidance_scale,
|
591 |
seed,
|
|
|
136 |
'preprocessor': 'canny',
|
137 |
},
|
138 |
lineart={
|
139 |
+
'model_name': 'controlnet-softedge-sdxl-1.0',
|
140 |
'strength': 1,
|
141 |
'preprocessor': 'lineart',
|
142 |
},
|
|
|
269 |
num_steps,
|
270 |
identitynet_strength_ratio,
|
271 |
adapter_strength_ratio,
|
272 |
+
controlnet_strength_1, controlnet_strength_2, controlnet_strength_3, controlnet_strength_4,
|
273 |
controlnet_selection,
|
274 |
guidance_scale,
|
275 |
seed,
|
|
|
313 |
CONTROLNET_DICT['canny']['strength'],
|
314 |
CONTROLNET_DICT['depth']['strength'],
|
315 |
CONTROLNET_DICT['lineart']['strength'],
|
316 |
+
) = [controlnet_strength_1, controlnet_strength_2, controlnet_strength_3, controlnet_strength_4]
|
317 |
|
318 |
face_image_uploaded, ref_image_uploaded = upload_assets_with_cache(client, [face_image_path, ref_image_path])
|
319 |
|
|
|
474 |
)
|
475 |
with gr.Accordion('Controlnet'):
|
476 |
controlnet_selection = gr.CheckboxGroup(
|
477 |
+
CONTROLNET_DICT.keys(), label='Controlnet', value=['pose'],
|
478 |
info='Use pose for skeleton inference, canny for edge detection, and depth for depth map estimation. You can try all three to control the generation process'
|
479 |
)
|
480 |
pose_strength = gr.Slider(
|
|
|
580 |
num_steps,
|
581 |
identitynet_strength_ratio,
|
582 |
adapter_strength_ratio,
|
583 |
+
#[
|
584 |
pose_strength,
|
585 |
canny_strength,
|
586 |
depth_strength,
|
587 |
lineart_strength,
|
588 |
+
#],
|
589 |
controlnet_selection,
|
590 |
guidance_scale,
|
591 |
seed,
|