multimodalart HF staff commited on
Commit
1287e5e
1 Parent(s): 0e80ee6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -49,18 +49,20 @@ def generate_image(images, prompt, negative_prompt, preserve_face_structure, pro
49
  faces = app.get(face)
50
  faceid_embed = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)
51
  faceid_all_embeds.append(faceid_embed)
52
- if(first_iteration):
53
  face_image = face_align.norm_crop(face, landmark=faces[0].kps, image_size=224) # you can also segment the face
54
  first_iteration = False
55
 
56
  average_embedding = torch.mean(torch.stack(faceid_all_embeds, dim=0), dim=0)
57
 
58
  if(not preserve_face_structure):
 
59
  image = ip_model.generate(
60
  prompt=prompt, negative_prompt=negative_prompt, faceid_embeds=average_embedding,
61
  width=512, height=512, num_inference_steps=30
62
  )
63
  else:
 
64
  image = ip_model_plus.generate(
65
  prompt=prompt, negative_prompt=negative_prompt, faceid_embeds=average_embedding,
66
  face_image=face_image, shortcut=True, s_scale=1.5, width=512, height=512, num_inference_steps=30
 
49
  faces = app.get(face)
50
  faceid_embed = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)
51
  faceid_all_embeds.append(faceid_embed)
52
+ if(first_iteration and preserve_face_structure):
53
  face_image = face_align.norm_crop(face, landmark=faces[0].kps, image_size=224) # you can also segment the face
54
  first_iteration = False
55
 
56
  average_embedding = torch.mean(torch.stack(faceid_all_embeds, dim=0), dim=0)
57
 
58
  if(not preserve_face_structure):
59
+ print("Generating normal")
60
  image = ip_model.generate(
61
  prompt=prompt, negative_prompt=negative_prompt, faceid_embeds=average_embedding,
62
  width=512, height=512, num_inference_steps=30
63
  )
64
  else:
65
+ print("Generating plus")
66
  image = ip_model_plus.generate(
67
  prompt=prompt, negative_prompt=negative_prompt, faceid_embeds=average_embedding,
68
  face_image=face_image, shortcut=True, s_scale=1.5, width=512, height=512, num_inference_steps=30