Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -470,7 +470,7 @@ def get_example():
|
|
470 |
"./assets/examples/id_customization/chenhao/image_1.png",
|
471 |
"./assets/examples/id_customization/chenhao/image_2.png",
|
472 |
], # Input image paths
|
473 |
-
"[[faceid]] [[img0]]
|
474 |
NEGATIVE_PROMPT,
|
475 |
50, # num_steps
|
476 |
4.0, # guidance_scale
|
@@ -512,7 +512,7 @@ def get_example():
|
|
512 |
"Semantic to Image", # Example name - new column
|
513 |
"assets/examples/semantic_map/dragon_birds_woman.webp", # Preview column
|
514 |
["assets/examples/semantic_map/dragon_birds_woman.webp"], # Input image path
|
515 |
-
"[[semanticmap2image]] <#00ffff Cyan mask:
|
516 |
NEGATIVE_PROMPT,
|
517 |
50, # num_steps
|
518 |
4.0, # guidance_scale
|
@@ -795,7 +795,7 @@ if __name__ == "__main__":
|
|
795 |
- For FaceID tasks:
|
796 |
+ Use 3 or 4 images if single input image does not give satisfactory results.
|
797 |
+ All images will be resized and center cropped to the input height and width. You should choose height and width so that faces in input images won't be cropped.
|
798 |
-
+ Model works best with close-up portrait (input and output) images.
|
799 |
+ If the model does not conform your text prompt, try using shorter caption for source image(s).
|
800 |
+ If you have non-human subjects and does not get satisfactory results, try "copying" part of caption of source images where it describes the properties of the subject e.g., a monster with red eyes, sharp teeth, etc.
|
801 |
|
@@ -834,10 +834,6 @@ if __name__ == "__main__":
|
|
834 |
height="auto",
|
835 |
object_fit="contain"
|
836 |
)
|
837 |
-
|
838 |
-
with gr.Row():
|
839 |
-
delete_button = gr.Button("Delete Selected Images")
|
840 |
-
delete_all_button = gr.Button("Delete All Images")
|
841 |
|
842 |
task_type = gr.Dropdown(
|
843 |
choices=list(TASK2SPECIAL_TOKENS.keys()),
|
@@ -950,52 +946,6 @@ if __name__ == "__main__":
|
|
950 |
selected_indices.append(evt.index)
|
951 |
return selected_indices
|
952 |
|
953 |
-
# Connect gallery upload
|
954 |
-
gallery.upload(
|
955 |
-
fn=update_gallery_state,
|
956 |
-
inputs=[gallery, images_state],
|
957 |
-
outputs=[images_state, gallery, preview_gallery],
|
958 |
-
show_progress="full"
|
959 |
-
).then(
|
960 |
-
fn=update_height_width,
|
961 |
-
inputs=[images_state],
|
962 |
-
outputs=[height, width]
|
963 |
-
).then(
|
964 |
-
fn=update_denoise_checkboxes,
|
965 |
-
inputs=[images_state, task_type, azimuth, elevation, distance],
|
966 |
-
outputs=[denoise_mask_checkbox]
|
967 |
-
)
|
968 |
-
|
969 |
-
# Update delete buttons connections
|
970 |
-
delete_button.click(
|
971 |
-
fn=delete_selected_images,
|
972 |
-
inputs=[selected_indices_state, images_state, gallery],
|
973 |
-
outputs=[images_state, gallery, preview_gallery, selected_indices_state]
|
974 |
-
).then(
|
975 |
-
fn=update_height_width,
|
976 |
-
inputs=[images_state],
|
977 |
-
outputs=[height, width]
|
978 |
-
).then(
|
979 |
-
fn=update_denoise_checkboxes,
|
980 |
-
inputs=[images_state, task_type, azimuth, elevation, distance],
|
981 |
-
outputs=[denoise_mask_checkbox]
|
982 |
-
)
|
983 |
-
|
984 |
-
delete_all_button.click(
|
985 |
-
fn=delete_all_images,
|
986 |
-
inputs=[],
|
987 |
-
outputs=[images_state, gallery, preview_gallery, selected_indices_state]
|
988 |
-
).then(
|
989 |
-
fn=update_denoise_checkboxes,
|
990 |
-
inputs=[images_state, task_type, azimuth, elevation, distance],
|
991 |
-
outputs=[denoise_mask_checkbox]
|
992 |
-
).then(
|
993 |
-
fn=update_height_width,
|
994 |
-
inputs=[images_state],
|
995 |
-
outputs=[height, width]
|
996 |
-
)
|
997 |
-
|
998 |
-
|
999 |
task_type.change(
|
1000 |
fn=update_denoise_checkboxes,
|
1001 |
inputs=[images_state, task_type, azimuth, elevation, distance],
|
|
|
470 |
"./assets/examples/id_customization/chenhao/image_1.png",
|
471 |
"./assets/examples/id_customization/chenhao/image_2.png",
|
472 |
], # Input image paths
|
473 |
+
"[[faceid]] [[img0]] detailed face, A woman with dark hair styled in an intricate updo, wearing a traditional orange and black outfit with elaborate gold embroidery. She has an elegant, poised expression, standing against a serene outdoor setting with classical architecture [[img1]] A young Asian woman with long dark hair and brown eyes smiles at the camera. She wears a red tank top with white flowers and green leaves. The background is blurred, with white and blue tones visible. The image has a slightly grainy quality. [[img2]] A young Asian woman in traditional attire stands against a brown background. She wears a white dress adorned with purple and green floral patterns. Her hair is styled in a bun, and she holds a small white lace umbrella with a gold handle. The image captures her elegant appearance and cultural dress. [[img3]] A woman in traditional Asian attire stands in front of a blurred building. She wears a green robe with floral designs and a black hat with lace. A man in a red robe and black hat stands behind her. The scene appears to be set in an Asian country.",
|
474 |
NEGATIVE_PROMPT,
|
475 |
50, # num_steps
|
476 |
4.0, # guidance_scale
|
|
|
512 |
"Semantic to Image", # Example name - new column
|
513 |
"assets/examples/semantic_map/dragon_birds_woman.webp", # Preview column
|
514 |
["assets/examples/semantic_map/dragon_birds_woman.webp"], # Input image path
|
515 |
+
"[[semanticmap2image]] <#00ffff Cyan mask: dragon> <#ff0000 yellow mask: bird> <#800080 purple mask: woman> A woman in a red dress with gold floral patterns stands in a traditional Japanese-style building. She has black hair and wears a gold choker and earrings. Behind her, a large orange and white dragon coils around the structure. Two white birds fly near her. The building features paper windows and a wooden roof with lanterns. The scene blends traditional Japanese architecture with fantastical elements, creating a mystical atmosphere.",
|
516 |
NEGATIVE_PROMPT,
|
517 |
50, # num_steps
|
518 |
4.0, # guidance_scale
|
|
|
795 |
- For FaceID tasks:
|
796 |
+ Use 3 or 4 images if single input image does not give satisfactory results.
|
797 |
+ All images will be resized and center cropped to the input height and width. You should choose height and width so that faces in input images won't be cropped.
|
798 |
+
+ Model works best with close-up portrait (input and output) images. For example add some keywords such as `detailed face`, `closeup portrait`
|
799 |
+ If the model does not conform your text prompt, try using shorter caption for source image(s).
|
800 |
+ If you have non-human subjects and does not get satisfactory results, try "copying" part of caption of source images where it describes the properties of the subject e.g., a monster with red eyes, sharp teeth, etc.
|
801 |
|
|
|
834 |
height="auto",
|
835 |
object_fit="contain"
|
836 |
)
|
|
|
|
|
|
|
|
|
837 |
|
838 |
task_type = gr.Dropdown(
|
839 |
choices=list(TASK2SPECIAL_TOKENS.keys()),
|
|
|
946 |
selected_indices.append(evt.index)
|
947 |
return selected_indices
|
948 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
949 |
task_type.change(
|
950 |
fn=update_denoise_checkboxes,
|
951 |
inputs=[images_state, task_type, azimuth, elevation, distance],
|