batoon commited on
Commit
8cfe76f
1 Parent(s): f4fab1d
Files changed (1) hide show
  1. app.py +41 -22
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import math
2
  import random
3
 
@@ -22,18 +23,20 @@ STYLE_NAMES = list(styles.keys())
22
  DEFAULT_STYLE_NAME = "Watercolor"
23
 
24
  # download checkpoints
25
- from huggingface_hub import hf_hub_download
26
 
27
- hf_hub_download(repo_id="InstantX/InstantID", filename="ControlNetModel/config.json", local_dir="./checkpoints")
 
28
  hf_hub_download(
29
  repo_id="InstantX/InstantID",
30
  filename="ControlNetModel/diffusion_pytorch_model.safetensors",
31
  local_dir="./checkpoints",
32
  )
33
- hf_hub_download(repo_id="InstantX/InstantID", filename="ip-adapter.bin", local_dir="./checkpoints")
 
34
 
35
  # Load face encoder
36
- app = FaceAnalysis(name="antelopev2", root="./", providers=["CPUExecutionProvider"])
 
37
  app.prepare(ctx_id=0, det_size=(640, 640))
38
 
39
  # Path to InstantID models
@@ -41,9 +44,10 @@ face_adapter = "./checkpoints/ip-adapter.bin"
41
  controlnet_path = "./checkpoints/ControlNetModel"
42
 
43
  # Load pipeline
44
- controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
 
45
 
46
- base_model_path = "wangqixun/YamerMIX_v8"
47
 
48
  pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
49
  base_model_path,
@@ -133,7 +137,8 @@ def draw_kps(image_pil, kps, color_list=[(255, 0, 0), (0, 255, 0), (0, 0, 255),
133
  length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
134
  angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
135
  polygon = cv2.ellipse2Poly(
136
- (int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1
 
137
  )
138
  out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
139
  out_img = (out_img * 0.6).astype(np.uint8)
@@ -163,16 +168,20 @@ def resize_img(
163
  ratio = min_side / min(h, w)
164
  w, h = round(ratio * w), round(ratio * h)
165
  ratio = max_side / max(h, w)
166
- input_image = input_image.resize([round(ratio * w), round(ratio * h)], mode)
167
- w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
168
- h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
 
 
 
169
  input_image = input_image.resize([w_resize_new, h_resize_new], mode)
170
 
171
  if pad_to_max_side:
172
  res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
173
  offset_x = (max_side - w_resize_new) // 2
174
  offset_y = (max_side - h_resize_new) // 2
175
- res[offset_y : offset_y + h_resize_new, offset_x : offset_x + w_resize_new] = np.array(input_image)
 
176
  input_image = Image.fromarray(res)
177
  return input_image
178
 
@@ -184,7 +193,8 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str
184
 
185
  def check_input_image(face_image):
186
  if face_image is None:
187
- raise gr.Error("Cannot find any input face image! Please upload the face image")
 
188
 
189
 
190
  @spaces.GPU
@@ -217,13 +227,15 @@ def generate_image(
217
  face_info = app.get(face_image_cv2)
218
 
219
  if len(face_info) == 0:
220
- raise gr.Error("Cannot find any face in the image! Please upload another person image")
 
221
 
222
  face_info = sorted(face_info, key=lambda x: (x["bbox"][2] - x["bbox"][0]) * x["bbox"][3] - x["bbox"][1])[
223
  -1
224
  ] # only use the maximum face
225
  face_emb = face_info["embedding"]
226
- face_kps = draw_kps(convert_from_cv2_to_image(face_image_cv2), face_info["kps"])
 
227
 
228
  if pose_image_path is not None:
229
  pose_image = load_image(pose_image_path)
@@ -233,7 +245,8 @@ def generate_image(
233
  face_info = app.get(pose_image_cv2)
234
 
235
  if len(face_info) == 0:
236
- raise gr.Error("Cannot find any face in the reference image! Please upload another person image")
 
237
 
238
  face_info = face_info[-1]
239
  face_kps = draw_kps(pose_image, face_info["kps"])
@@ -272,7 +285,7 @@ def generate_image(
272
  return images[0], gr.update(visible=True)
273
 
274
 
275
- ### Description
276
  title = r"""
277
  <h1 align="center">InstantID: Zero-shot Identity-Preserving Generation in Seconds</h1>
278
  """
@@ -325,10 +338,12 @@ with gr.Blocks(css=css) as demo:
325
  with gr.Row():
326
  with gr.Column():
327
  # upload face image
328
- face_file = gr.Image(label="Upload a photo of your face", type="filepath")
 
329
 
330
  # optional: upload a reference pose image
331
- pose_file = gr.Image(label="Upload a reference pose image (optional)", type="filepath")
 
332
 
333
  # prompt
334
  prompt = gr.Textbox(
@@ -340,7 +355,8 @@ with gr.Blocks(css=css) as demo:
340
 
341
  submit = gr.Button("Submit", variant="primary")
342
 
343
- style = gr.Dropdown(label="Style template", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
 
344
 
345
  # strength
346
  identitynet_strength_ratio = gr.Slider(
@@ -385,12 +401,15 @@ with gr.Blocks(css=css) as demo:
385
  step=1,
386
  value=42,
387
  )
388
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
389
- enhance_face_region = gr.Checkbox(label="Enhance non-face region", value=True)
 
 
390
 
391
  with gr.Column():
392
  output_image = gr.Image(label="Generated Image")
393
- usage_tips = gr.Markdown(label="Usage tips of InstantID", value=tips, visible=False)
 
394
 
395
  submit.click(
396
  fn=remove_tips,
 
1
+ from huggingface_hub import hf_hub_download
2
  import math
3
  import random
4
 
 
23
  DEFAULT_STYLE_NAME = "Watercolor"
24
 
25
  # download checkpoints
 
26
 
27
+ hf_hub_download(repo_id="InstantX/InstantID",
28
+ filename="ControlNetModel/config.json", local_dir="./checkpoints")
29
  hf_hub_download(
30
  repo_id="InstantX/InstantID",
31
  filename="ControlNetModel/diffusion_pytorch_model.safetensors",
32
  local_dir="./checkpoints",
33
  )
34
+ hf_hub_download(repo_id="InstantX/InstantID",
35
+ filename="ip-adapter.bin", local_dir="./checkpoints")
36
 
37
  # Load face encoder
38
+ app = FaceAnalysis(name="antelopev2", root="./",
39
+ providers=["CPUExecutionProvider"])
40
  app.prepare(ctx_id=0, det_size=(640, 640))
41
 
42
  # Path to InstantID models
 
44
  controlnet_path = "./checkpoints/ControlNetModel"
45
 
46
  # Load pipeline
47
+ controlnet = ControlNetModel.from_pretrained(
48
+ controlnet_path, torch_dtype=torch.float16)
49
 
50
+ base_model_path = "SG161222/RealVisXL_V3.0"
51
 
52
  pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
53
  base_model_path,
 
137
  length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
138
  angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
139
  polygon = cv2.ellipse2Poly(
140
+ (int(np.mean(x)), int(np.mean(y))), (int(
141
+ length / 2), stickwidth), int(angle), 0, 360, 1
142
  )
143
  out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
144
  out_img = (out_img * 0.6).astype(np.uint8)
 
168
  ratio = min_side / min(h, w)
169
  w, h = round(ratio * w), round(ratio * h)
170
  ratio = max_side / max(h, w)
171
+ input_image = input_image.resize(
172
+ [round(ratio * w), round(ratio * h)], mode)
173
+ w_resize_new = (round(ratio * w) // base_pixel_number) * \
174
+ base_pixel_number
175
+ h_resize_new = (round(ratio * h) // base_pixel_number) * \
176
+ base_pixel_number
177
  input_image = input_image.resize([w_resize_new, h_resize_new], mode)
178
 
179
  if pad_to_max_side:
180
  res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
181
  offset_x = (max_side - w_resize_new) // 2
182
  offset_y = (max_side - h_resize_new) // 2
183
+ res[offset_y: offset_y + h_resize_new, offset_x: offset_x +
184
+ w_resize_new] = np.array(input_image)
185
  input_image = Image.fromarray(res)
186
  return input_image
187
 
 
193
 
194
  def check_input_image(face_image):
195
  if face_image is None:
196
+ raise gr.Error(
197
+ "Cannot find any input face image! Please upload the face image")
198
 
199
 
200
  @spaces.GPU
 
227
  face_info = app.get(face_image_cv2)
228
 
229
  if len(face_info) == 0:
230
+ raise gr.Error(
231
+ "Cannot find any face in the image! Please upload another person image")
232
 
233
  face_info = sorted(face_info, key=lambda x: (x["bbox"][2] - x["bbox"][0]) * x["bbox"][3] - x["bbox"][1])[
234
  -1
235
  ] # only use the maximum face
236
  face_emb = face_info["embedding"]
237
+ face_kps = draw_kps(convert_from_cv2_to_image(
238
+ face_image_cv2), face_info["kps"])
239
 
240
  if pose_image_path is not None:
241
  pose_image = load_image(pose_image_path)
 
245
  face_info = app.get(pose_image_cv2)
246
 
247
  if len(face_info) == 0:
248
+ raise gr.Error(
249
+ "Cannot find any face in the reference image! Please upload another person image")
250
 
251
  face_info = face_info[-1]
252
  face_kps = draw_kps(pose_image, face_info["kps"])
 
285
  return images[0], gr.update(visible=True)
286
 
287
 
288
+ # Description
289
  title = r"""
290
  <h1 align="center">InstantID: Zero-shot Identity-Preserving Generation in Seconds</h1>
291
  """
 
338
  with gr.Row():
339
  with gr.Column():
340
  # upload face image
341
+ face_file = gr.Image(
342
+ label="Upload a photo of your face", type="filepath")
343
 
344
  # optional: upload a reference pose image
345
+ pose_file = gr.Image(
346
+ label="Upload a reference pose image (optional)", type="filepath")
347
 
348
  # prompt
349
  prompt = gr.Textbox(
 
355
 
356
  submit = gr.Button("Submit", variant="primary")
357
 
358
+ style = gr.Dropdown(label="Style template",
359
+ choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
360
 
361
  # strength
362
  identitynet_strength_ratio = gr.Slider(
 
401
  step=1,
402
  value=42,
403
  )
404
+ randomize_seed = gr.Checkbox(
405
+ label="Randomize seed", value=True)
406
+ enhance_face_region = gr.Checkbox(
407
+ label="Enhance non-face region", value=True)
408
 
409
  with gr.Column():
410
  output_image = gr.Image(label="Generated Image")
411
+ usage_tips = gr.Markdown(
412
+ label="Usage tips of InstantID", value=tips, visible=False)
413
 
414
  submit.click(
415
  fn=remove_tips,