Spaces:
Running
on
Zero
Running
on
Zero
Commit
•
41238f8
1
Parent(s):
289ee2f
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ from ip_adapter.ip_adapter_faceid import IPAdapterFaceID
|
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
from insightface.app import FaceAnalysis
|
7 |
import gradio as gr
|
|
|
8 |
|
9 |
base_model_path = "SG161222/Realistic_Vision_V4.0_noVAE"
|
10 |
vae_model_path = "stabilityai/sd-vae-ft-mse"
|
@@ -34,22 +35,28 @@ pipe = StableDiffusionPipeline.from_pretrained(
|
|
34 |
ip_model = IPAdapterFaceID(pipe, ip_ckpt, device)
|
35 |
|
36 |
@spaces.GPU
|
37 |
-
def generate_image(
|
38 |
pipe.to(device)
|
39 |
-
|
40 |
app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
|
41 |
app.prepare(ctx_id=0, det_size=(640, 640))
|
42 |
-
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
image = ip_model.generate(
|
46 |
-
prompt=prompt, negative_prompt=negative_prompt, faceid_embeds=
|
47 |
)
|
48 |
print(image)
|
49 |
return image
|
50 |
|
51 |
demo = gr.Interface(fn=generate_image,
|
52 |
-
inputs=[gr.
|
53 |
outputs=[gr.Gallery(label="Generated Image")],
|
54 |
title="IP-Adapter-FaceID demo",
|
55 |
description="Demo for the [h94/IP-Adapter-FaceID model](https://huggingface.co/h94/IP-Adapter-FaceID)"
|
|
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
from insightface.app import FaceAnalysis
|
7 |
import gradio as gr
|
8 |
+
import cv2
|
9 |
|
10 |
base_model_path = "SG161222/Realistic_Vision_V4.0_noVAE"
|
11 |
vae_model_path = "stabilityai/sd-vae-ft-mse"
|
|
|
35 |
ip_model = IPAdapterFaceID(pipe, ip_ckpt, device)
|
36 |
|
37 |
@spaces.GPU
|
38 |
+
def generate_image(images, prompt, negative_prompt):
|
39 |
pipe.to(device)
|
|
|
40 |
app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
|
41 |
app.prepare(ctx_id=0, det_size=(640, 640))
|
42 |
+
|
43 |
+
faceid_all_embeds = []
|
44 |
+
for image in images:
|
45 |
+
face = cv2.imread(image)
|
46 |
+
faces = app.get(face)
|
47 |
+
faceid_embed = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)
|
48 |
+
faceid_all_embeds.append(faceid_embed)
|
49 |
+
|
50 |
+
average_embedding = torch.mean(torch.stack(faceid_all_embeds, dim=0), dim=0)
|
51 |
|
52 |
image = ip_model.generate(
|
53 |
+
prompt=prompt, negative_prompt=negative_prompt, faceid_embeds=average_embedding, width=512, height=512, num_inference_steps=30
|
54 |
)
|
55 |
print(image)
|
56 |
return image
|
57 |
|
58 |
demo = gr.Interface(fn=generate_image,
|
59 |
+
inputs=[gr.Files(label="Drag 1 or more photos of your face", file_types="image"),gr.Textbox(label="Prompt"), gr.Textbox(label="Negative Prompt")],
|
60 |
outputs=[gr.Gallery(label="Generated Image")],
|
61 |
title="IP-Adapter-FaceID demo",
|
62 |
description="Demo for the [h94/IP-Adapter-FaceID model](https://huggingface.co/h94/IP-Adapter-FaceID)"
|