feishen29 commited on
Commit
50d57c0
·
verified ·
1 Parent(s): fbe9d1a

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -15
app.py CHANGED
@@ -46,9 +46,9 @@ parser.add_argument('--if_resampler', type=bool, default=True)
46
  parser.add_argument('--if_ipa', type=bool, default=True)
47
  parser.add_argument('--if_control', type=bool, default=True)
48
 
49
- parser.add_argument('--pretrained_model_name_or_path',
50
- default="./ckpt/Realistic_Vision_V4.0_noVAE",
51
- type=str)
52
  parser.add_argument('--ip_ckpt',
53
  default="./ckpt/ip-adapter-faceid-plus_sd15.bin",
54
  type=str)
@@ -86,7 +86,7 @@ image_encoder = CLIPVisionModelWithProjection.from_pretrained(args.pretrained_im
86
  unet = UNet2DConditionModel.from_pretrained("./ckpt/unet").to(
87
  dtype=torch.float16,device=args.device)
88
 
89
- image_face_fusion = pipeline('face_fusion_torch', model='damo/cv_unet_face_fusion_torch', model_revision='v1.0.3')
90
 
91
  #face_model
92
  app = FaceAnalysis(providers=[('CUDAExecutionProvider', {"device_id": args.device})]) ##使用GPU:0, 默认使用buffalo_l就可以了
@@ -186,7 +186,7 @@ img_transform = transforms.Compose([
186
  transforms.Normalize([0.5], [0.5]),
187
  ])
188
 
189
- openpose_model = OpenposeDetector.from_pretrained("/home/sf/ControlNet").to(args.device)
190
 
191
  def resize_img(input_image, max_side=640, min_side=512, size=None,
192
  pad_to_max_side=False, mode=Image.BILINEAR, base_pixel_number=64):
@@ -282,15 +282,15 @@ def tryon_process(garm_img, face_img, pose_img, prompt, cloth_guidance_scale, ca
282
  num_inference_steps=denoise_steps,
283
  ).images
284
 
285
- if if_post and if_ipa:
286
- # 将 PIL 图像转换为 NumPy 数组
287
- output_array = np.array(output[0])
288
- # 将 RGB 图像转换为 BGR 图像
289
- bgr_array = cv2.cvtColor(output_array, cv2.COLOR_RGB2BGR)
290
- # 将 NumPy 数组转换为 PIL 图像
291
- bgr_image = Image.fromarray(bgr_array)
292
- result = image_face_fusion(dict(template=bgr_image, user=Image.fromarray(face_image.astype('uint8'))))
293
- return result[OutputKeys.OUTPUT_IMG]
294
  return output[0]
295
 
296
  example_path = os.path.dirname(__file__)
@@ -396,4 +396,4 @@ with image_blocks as demo:
396
  try_button.click(fn=tryon_process, inputs=[garm_img, imgs, pose_img, prompt, cloth_guidance_scale, caption_guidance_scale, face_guidance_scale,self_guidance_scale, cross_guidance_scale, is_checked_face, is_checked_postprocess, is_checked_pose, denoise_steps, seed],
397
  outputs=[image_out], api_name='tryon')
398
 
399
- image_blocks.launch(server_port=20021) # 指定固定端口
 
46
  parser.add_argument('--if_ipa', type=bool, default=True)
47
  parser.add_argument('--if_control', type=bool, default=True)
48
 
49
+ # parser.add_argument('--pretrained_model_name_or_path',
50
+ # default="./ckpt/Realistic_Vision_V4.0_noVAE",
51
+ # type=str)
52
  parser.add_argument('--ip_ckpt',
53
  default="./ckpt/ip-adapter-faceid-plus_sd15.bin",
54
  type=str)
 
86
  unet = UNet2DConditionModel.from_pretrained("./ckpt/unet").to(
87
  dtype=torch.float16,device=args.device)
88
 
89
+ # image_face_fusion = pipeline('face_fusion_torch', model='damo/cv_unet_face_fusion_torch', model_revision='v1.0.3')
90
 
91
  #face_model
92
  app = FaceAnalysis(providers=[('CUDAExecutionProvider', {"device_id": args.device})]) ##使用GPU:0, 默认使用buffalo_l就可以了
 
186
  transforms.Normalize([0.5], [0.5]),
187
  ])
188
 
189
+ openpose_model = OpenposeDetector.from_pretrained("./ckpt/ControlNet").to(args.device)
190
 
191
  def resize_img(input_image, max_side=640, min_side=512, size=None,
192
  pad_to_max_side=False, mode=Image.BILINEAR, base_pixel_number=64):
 
282
  num_inference_steps=denoise_steps,
283
  ).images
284
 
285
+ # if if_post and if_ipa:
286
+ # # 将 PIL 图像转换为 NumPy 数组
287
+ # output_array = np.array(output[0])
288
+ # # 将 RGB 图像转换为 BGR 图像
289
+ # bgr_array = cv2.cvtColor(output_array, cv2.COLOR_RGB2BGR)
290
+ # # 将 NumPy 数组转换为 PIL 图像
291
+ # bgr_image = Image.fromarray(bgr_array)
292
+ # result = image_face_fusion(dict(template=bgr_image, user=Image.fromarray(face_image.astype('uint8'))))
293
+ # return result[OutputKeys.OUTPUT_IMG]
294
  return output[0]
295
 
296
  example_path = os.path.dirname(__file__)
 
396
  try_button.click(fn=tryon_process, inputs=[garm_img, imgs, pose_img, prompt, cloth_guidance_scale, caption_guidance_scale, face_guidance_scale,self_guidance_scale, cross_guidance_scale, is_checked_face, is_checked_postprocess, is_checked_pose, denoise_steps, seed],
397
  outputs=[image_out], api_name='tryon')
398
 
399
+ image_blocks.launch()