Spaces:
Sleeping
Sleeping
k-l-lambda
commited on
Commit
•
b572397
1
Parent(s):
c5ba729
app.py: removed examples.
Browse files
app.py
CHANGED
@@ -121,28 +121,28 @@ def get_example ():
|
|
121 |
|
122 |
def run_for_examples_with_key (novita_key):
|
123 |
def run_for_examples (face_file, pose_file, prompt, style, negative_prompt):
|
124 |
-
print('run_for_examples:', face_file)
|
125 |
-
return generate_image(
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
)
|
145 |
-
|
146 |
return run_for_examples
|
147 |
|
148 |
|
@@ -163,7 +163,7 @@ def generate_image (
|
|
163 |
guidance_scale,
|
164 |
seed,
|
165 |
scheduler,
|
166 |
-
enable_LCM,
|
167 |
enhance_face_region,
|
168 |
progress=gr.Progress(track_tqdm=True),
|
169 |
):
|
@@ -299,10 +299,10 @@ with gr.Blocks(css=css) as demo:
|
|
299 |
)
|
300 |
|
301 |
submit = gr.Button('Submit', variant='primary')
|
302 |
-
enable_LCM = gr.Checkbox(
|
303 |
-
|
304 |
-
|
305 |
-
)
|
306 |
style = gr.Dropdown(
|
307 |
label='Style template',
|
308 |
choices=STYLE_NAMES,
|
@@ -430,27 +430,27 @@ with gr.Blocks(css=css) as demo:
|
|
430 |
guidance_scale,
|
431 |
seed,
|
432 |
scheduler,
|
433 |
-
enable_LCM,
|
434 |
enhance_face_region,
|
435 |
],
|
436 |
outputs=[gallery, usage_tips],
|
437 |
)
|
438 |
|
439 |
-
enable_LCM.input(
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
)
|
445 |
-
|
446 |
-
gr.Examples(
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
)
|
454 |
|
455 |
gr.Markdown(article)
|
456 |
|
|
|
121 |
|
122 |
def run_for_examples_with_key (novita_key):
|
123 |
def run_for_examples (face_file, pose_file, prompt, style, negative_prompt):
|
124 |
+
print('run_for_examples:', novita_key, face_file)
|
125 |
+
#return generate_image(
|
126 |
+
# novita_key,
|
127 |
+
# face_file,
|
128 |
+
# pose_file,
|
129 |
+
# prompt,
|
130 |
+
# negative_prompt,
|
131 |
+
# style,
|
132 |
+
# 20, # num_steps
|
133 |
+
# 0.8, # identitynet_strength_ratio
|
134 |
+
# 0.8, # adapter_strength_ratio
|
135 |
+
# 0.4, # pose_strength
|
136 |
+
# 0.3, # canny_strength
|
137 |
+
# 0.5, # depth_strength
|
138 |
+
# ['pose', 'canny'], # controlnet_selection
|
139 |
+
# 5.0, # guidance_scale
|
140 |
+
# 42, # seed
|
141 |
+
# 'Euler a', # scheduler
|
142 |
+
# #False, # enable_LCM
|
143 |
+
# True, # enable_Face_Region
|
144 |
+
#)
|
145 |
+
return None, gr.update(visible=True)
|
146 |
return run_for_examples
|
147 |
|
148 |
|
|
|
163 |
guidance_scale,
|
164 |
seed,
|
165 |
scheduler,
|
166 |
+
#enable_LCM,
|
167 |
enhance_face_region,
|
168 |
progress=gr.Progress(track_tqdm=True),
|
169 |
):
|
|
|
299 |
)
|
300 |
|
301 |
submit = gr.Button('Submit', variant='primary')
|
302 |
+
#enable_LCM = gr.Checkbox(
|
303 |
+
# label='Enable Fast Inference with LCM', value=enable_lcm_arg,
|
304 |
+
# info='LCM speeds up the inference step, the trade-off is the quality of the generated image. It performs better with portrait face images rather than distant faces',
|
305 |
+
#)
|
306 |
style = gr.Dropdown(
|
307 |
label='Style template',
|
308 |
choices=STYLE_NAMES,
|
|
|
430 |
guidance_scale,
|
431 |
seed,
|
432 |
scheduler,
|
433 |
+
#enable_LCM,
|
434 |
enhance_face_region,
|
435 |
],
|
436 |
outputs=[gallery, usage_tips],
|
437 |
)
|
438 |
|
439 |
+
#enable_LCM.input(
|
440 |
+
# fn=toggle_lcm_ui,
|
441 |
+
# inputs=[enable_LCM],
|
442 |
+
# outputs=[num_steps, guidance_scale],
|
443 |
+
# queue=False,
|
444 |
+
#)
|
445 |
+
|
446 |
+
#gr.Examples(
|
447 |
+
# examples=get_example(),
|
448 |
+
# inputs=[face_file, pose_file, prompt, style, negative_prompt],
|
449 |
+
# fn=run_for_examples_with_key(novita_key),
|
450 |
+
# run_on_click=True,
|
451 |
+
# outputs=[gallery, usage_tips],
|
452 |
+
# cache_examples=True,
|
453 |
+
#)
|
454 |
|
455 |
gr.Markdown(article)
|
456 |
|