jhj0517 commited on
Commit
1f6f578
1 Parent(s): de2727e

add `init_model()` to pose alignment

Browse files
Files changed (1) hide show
  1. pose_align.py +15 -12
pose_align.py CHANGED
@@ -71,18 +71,10 @@ class PoseAlignmentInference:
71
  H_out, W_out = size_calculate(H_in,W_in, detect_resolution)
72
  H_out, W_out = size_calculate(H_out,W_out, image_resolution)
73
 
74
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
75
- self.detector = DWposeDetector(
76
- det_config = self.config_paths["det_config"],
77
- det_ckpt = self.model_paths["det_ckpt"],
78
- pose_config = self.config_paths["pose_config"],
79
- pose_ckpt = self.model_paths["pose_ckpt"],
80
- keypoints_only=False
81
- )
82
- detector = self.detector.to(device)
83
 
84
  refer_img = cv2.imread(imgfn_refer)
85
- output_refer, pose_refer = detector(refer_img,detect_resolution=detect_resolution, image_resolution=image_resolution, output_type='cv2',return_pose_dict=True)
86
  body_ref_img = pose_refer['bodies']['candidate']
87
  hands_ref_img = pose_refer['hands']
88
  faces_ref_img = pose_refer['faces']
@@ -119,7 +111,7 @@ class PoseAlignmentInference:
119
 
120
  # estimate scale parameters by the 1st frame in the video
121
  if i==skip_frames:
122
- output_1st_img, pose_1st_img = detector(img, detect_resolution, image_resolution, output_type='cv2', return_pose_dict=True)
123
  body_1st_img = pose_1st_img['bodies']['candidate']
124
  hands_1st_img = pose_1st_img['hands']
125
  faces_1st_img = pose_1st_img['faces']
@@ -246,7 +238,7 @@ class PoseAlignmentInference:
246
 
247
 
248
  # pose align
249
- pose_img, pose_ori = detector(img, detect_resolution, image_resolution, output_type='cv2', return_pose_dict=True)
250
  video_pose_buffer.append(pose_img)
251
  pose_align = self.align_img(img, pose_ori, align_args, detect_resolution, image_resolution)
252
 
@@ -321,6 +313,17 @@ class PoseAlignmentInference:
321
  self.release_vram()
322
  return outfn_align_pose_video, outfn
323
 
 
 
 
 
 
 
 
 
 
 
 
324
  def release_vram(self):
325
  if self.detector is not None:
326
  del self.detector
 
71
  H_out, W_out = size_calculate(H_in,W_in, detect_resolution)
72
  H_out, W_out = size_calculate(H_out,W_out, image_resolution)
73
 
74
+ self.init_model()
 
 
 
 
 
 
 
 
75
 
76
  refer_img = cv2.imread(imgfn_refer)
77
+ output_refer, pose_refer = self.detector(refer_img,detect_resolution=detect_resolution, image_resolution=image_resolution, output_type='cv2',return_pose_dict=True)
78
  body_ref_img = pose_refer['bodies']['candidate']
79
  hands_ref_img = pose_refer['hands']
80
  faces_ref_img = pose_refer['faces']
 
111
 
112
  # estimate scale parameters by the 1st frame in the video
113
  if i==skip_frames:
114
+ output_1st_img, pose_1st_img = self.detector(img, detect_resolution, image_resolution, output_type='cv2', return_pose_dict=True)
115
  body_1st_img = pose_1st_img['bodies']['candidate']
116
  hands_1st_img = pose_1st_img['hands']
117
  faces_1st_img = pose_1st_img['faces']
 
238
 
239
 
240
  # pose align
241
+ pose_img, pose_ori = self.detector(img, detect_resolution, image_resolution, output_type='cv2', return_pose_dict=True)
242
  video_pose_buffer.append(pose_img)
243
  pose_align = self.align_img(img, pose_ori, align_args, detect_resolution, image_resolution)
244
 
 
313
  self.release_vram()
314
  return outfn_align_pose_video, outfn
315
 
316
+ def init_model(self):
317
+ if self.detector is None:
318
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
319
+ self.detector = DWposeDetector(
320
+ det_config=self.config_paths["det_config"],
321
+ det_ckpt=self.model_paths["det_ckpt"],
322
+ pose_config=self.config_paths["pose_config"],
323
+ pose_ckpt=self.model_paths["pose_ckpt"],
324
+ keypoints_only=False
325
+ ).to(device)
326
+
327
  def release_vram(self):
328
  if self.detector is not None:
329
  del self.detector