Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,14 @@ from insightface.utils import face_align
|
|
10 |
import gradio as gr
|
11 |
import cv2
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
vae_model_path = "stabilityai/sd-vae-ft-mse"
|
15 |
image_encoder_path = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
|
16 |
ip_ckpt = hf_hub_download(repo_id="h94/IP-Adapter-FaceID", filename="ip-adapter-faceid_sd15.bin", repo_type="model")
|
@@ -32,20 +39,20 @@ noise_scheduler = DDIMScheduler(
|
|
32 |
steps_offset=1,
|
33 |
)
|
34 |
vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)
|
35 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
36 |
-
base_model_path,
|
37 |
-
torch_dtype=torch.float16,
|
38 |
-
scheduler=noise_scheduler,
|
39 |
-
vae=vae,
|
40 |
-
feature_extractor=safety_feature_extractor,
|
41 |
-
safety_checker=None # <--- Disable safety checker
|
42 |
-
).to(device)
|
43 |
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
-
ip_model =
|
48 |
-
ip_model_plus =
|
49 |
|
50 |
app = FaceAnalysis(name="buffalo_l", providers=['CPUExecutionProvider'])
|
51 |
app.prepare(ctx_id=0, det_size=(640, 640))
|
@@ -53,7 +60,13 @@ app.prepare(ctx_id=0, det_size=(640, 640))
|
|
53 |
cv2.setNumThreads(1)
|
54 |
|
55 |
@spaces.GPU(enable_queue=True)
|
56 |
-
def generate_image(images, prompt, negative_prompt, preserve_face_structure, face_strength, likeness_strength, nfaa_negative_prompt, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
faceid_all_embeds = []
|
58 |
first_iteration = True
|
59 |
for image in images:
|
@@ -95,10 +108,12 @@ def swap_to_gallery(images):
|
|
95 |
|
96 |
def remove_back_to_files():
|
97 |
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
|
|
|
98 |
css = '''
|
99 |
h1{margin-bottom: 0 !important}
|
100 |
footer{display:none !important}
|
101 |
'''
|
|
|
102 |
with gr.Blocks(css=css) as demo:
|
103 |
gr.Markdown("")
|
104 |
gr.Markdown("")
|
@@ -116,6 +131,7 @@ with gr.Blocks(css=css) as demo:
|
|
116 |
placeholder="A photo of a [man/woman/person]...")
|
117 |
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="low quality")
|
118 |
style = gr.Radio(label="Generation type", info="For stylized try prompts like 'a watercolor painting of a woman'", choices=["Photorealistic", "Stylized"], value="Photorealistic")
|
|
|
119 |
submit = gr.Button("Submit")
|
120 |
with gr.Accordion(open=False, label="Advanced Options"):
|
121 |
preserve = gr.Checkbox(label="Preserve Face Structure", info="Higher quality, less versatility (the face structure of your first photo will be preserved). Unchecking this will use the v1 model.", value=True)
|
@@ -130,7 +146,7 @@ with gr.Blocks(css=css) as demo:
|
|
130 |
files.upload(fn=swap_to_gallery, inputs=files, outputs=[uploaded_files, clear_button, files])
|
131 |
remove_and_reupload.click(fn=remove_back_to_files, outputs=[uploaded_files, clear_button, files])
|
132 |
submit.click(fn=generate_image,
|
133 |
-
inputs=[files,prompt,negative_prompt,preserve, face_strength, likeness_strength, nfaa_negative_prompts],
|
134 |
outputs=gallery)
|
135 |
|
136 |
gr.Markdown("")
|
|
|
10 |
import gradio as gr
|
11 |
import cv2
|
12 |
|
13 |
+
base_model_paths = {
|
14 |
+
"Realistic_Vision_V4.0_noVAE": "SG161222/Realistic_Vision_V4.0_noVAE",
|
15 |
+
"Realistic_Vision_V6.0_B1_noVAE": "SG161222/Realistic_Vision_V6.0_B1_noVAE",
|
16 |
+
"Deliberate": "Yntec/Deliberate",
|
17 |
+
"Deliberate2": "Yntec/Deliberate2",
|
18 |
+
"pony-diffusion": "AstraliteHeart/pony-diffusion"
|
19 |
+
}
|
20 |
+
|
21 |
vae_model_path = "stabilityai/sd-vae-ft-mse"
|
22 |
image_encoder_path = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
|
23 |
ip_ckpt = hf_hub_download(repo_id="h94/IP-Adapter-FaceID", filename="ip-adapter-faceid_sd15.bin", repo_type="model")
|
|
|
39 |
steps_offset=1,
|
40 |
)
|
41 |
vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
+
def load_model(base_model_path):
|
44 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
45 |
+
base_model_path,
|
46 |
+
torch_dtype=torch.float16,
|
47 |
+
scheduler=noise_scheduler,
|
48 |
+
vae=vae,
|
49 |
+
feature_extractor=safety_feature_extractor,
|
50 |
+
safety_checker=None # <--- Disable safety checker
|
51 |
+
).to(device)
|
52 |
+
return pipe
|
53 |
|
54 |
+
ip_model = None
|
55 |
+
ip_model_plus = None
|
56 |
|
57 |
app = FaceAnalysis(name="buffalo_l", providers=['CPUExecutionProvider'])
|
58 |
app.prepare(ctx_id=0, det_size=(640, 640))
|
|
|
60 |
cv2.setNumThreads(1)
|
61 |
|
62 |
@spaces.GPU(enable_queue=True)
|
63 |
+
def generate_image(images, prompt, negative_prompt, preserve_face_structure, face_strength, likeness_strength, nfaa_negative_prompt, base_model, progress=gr.Progress(track_tqdm=True)):
|
64 |
+
global ip_model, ip_model_plus
|
65 |
+
base_model_path = base_model_paths[base_model]
|
66 |
+
pipe = load_model(base_model_path)
|
67 |
+
ip_model = IPAdapterFaceID(pipe, ip_ckpt, device)
|
68 |
+
ip_model_plus = IPAdapterFaceIDPlus(pipe, image_encoder_path, ip_plus_ckpt, device)
|
69 |
+
|
70 |
faceid_all_embeds = []
|
71 |
first_iteration = True
|
72 |
for image in images:
|
|
|
108 |
|
109 |
def remove_back_to_files():
|
110 |
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
|
111 |
+
|
112 |
css = '''
|
113 |
h1{margin-bottom: 0 !important}
|
114 |
footer{display:none !important}
|
115 |
'''
|
116 |
+
|
117 |
with gr.Blocks(css=css) as demo:
|
118 |
gr.Markdown("")
|
119 |
gr.Markdown("")
|
|
|
131 |
placeholder="A photo of a [man/woman/person]...")
|
132 |
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="low quality")
|
133 |
style = gr.Radio(label="Generation type", info="For stylized try prompts like 'a watercolor painting of a woman'", choices=["Photorealistic", "Stylized"], value="Photorealistic")
|
134 |
+
base_model = gr.Dropdown(label="Base Model", choices=list(base_model_paths.keys()), value="Realistic_Vision_V4.0_noVAE")
|
135 |
submit = gr.Button("Submit")
|
136 |
with gr.Accordion(open=False, label="Advanced Options"):
|
137 |
preserve = gr.Checkbox(label="Preserve Face Structure", info="Higher quality, less versatility (the face structure of your first photo will be preserved). Unchecking this will use the v1 model.", value=True)
|
|
|
146 |
files.upload(fn=swap_to_gallery, inputs=files, outputs=[uploaded_files, clear_button, files])
|
147 |
remove_and_reupload.click(fn=remove_back_to_files, outputs=[uploaded_files, clear_button, files])
|
148 |
submit.click(fn=generate_image,
|
149 |
+
inputs=[files,prompt,negative_prompt,preserve, face_strength, likeness_strength, nfaa_negative_prompts, base_model],
|
150 |
outputs=gallery)
|
151 |
|
152 |
gr.Markdown("")
|