acmyu commited on
Commit
ebe2739
·
1 Parent(s): 3dcc757

change pose colors

Browse files
Files changed (2) hide show
  1. libs/easy_dwpose/draw/openpose.py +25 -12
  2. main.py +8 -0
libs/easy_dwpose/draw/openpose.py CHANGED
@@ -36,28 +36,41 @@ def draw_bodypose(canvas, candidate, subset):
36
  ]
37
 
38
  colors = [
39
- [255, 0, 0],
40
- [255, 85, 0],
41
- [255, 170, 0],
42
- [255, 255, 0],
43
  [170, 255, 0],
44
- [85, 255, 0],
 
 
 
 
45
  [0, 255, 0],
46
- [0, 255, 85],
47
- [0, 255, 170],
48
- [0, 255, 255],
49
- [0, 170, 255],
 
 
 
 
50
  [0, 85, 255],
51
  [0, 0, 255],
52
  [85, 0, 255],
 
53
  [170, 0, 255],
 
54
  [255, 0, 255],
55
- [255, 0, 170],
56
- [255, 0, 85],
57
  ]
58
 
 
 
59
  for i in range(17):
60
  for n in range(len(subset)):
 
 
61
  index = subset[n][np.array(limbSeq[i]) - 1]
62
  if -1 in index:
63
  continue
@@ -68,7 +81,7 @@ def draw_bodypose(canvas, candidate, subset):
68
  length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
69
  angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
70
  polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
71
- cv2.fillConvexPoly(canvas, polygon, colors[i])
72
 
73
  canvas = (canvas * 0.6).astype(np.uint8)
74
 
 
36
  ]
37
 
38
  colors = [
39
+ [0, 255, 255],
40
+ [255, 255, 255],
41
+
42
+ # arm-r
43
  [170, 255, 0],
44
+ [255, 255, 0],
45
+ [255, 170, 0],
46
+
47
+ # arm-l
48
+ [0, 85, 0],
49
  [0, 255, 0],
50
+ [0, 170, 0],
51
+
52
+ #leg-r
53
+ [255, 85, 0],
54
+ [255, 0, 0],
55
+ [255, 0, 85],
56
+
57
+ # leg-l
58
  [0, 85, 255],
59
  [0, 0, 255],
60
  [85, 0, 255],
61
+
62
  [170, 0, 255],
63
+ [0, 255, 170],
64
  [255, 0, 255],
65
+ [0, 170, 255],
 
66
  ]
67
 
68
+
69
+
70
  for i in range(17):
71
  for n in range(len(subset)):
72
+ j = np.array(limbSeq[i]) - 1
73
+ #print(i, j)
74
  index = subset[n][np.array(limbSeq[i]) - 1]
75
  if -1 in index:
76
  continue
 
81
  length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
82
  angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
83
  polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
84
+ cv2.fillConvexPoly(canvas, polygon, colors[j[1]])
85
 
86
  canvas = (canvas * 0.6).astype(np.uint8)
87
 
main.py CHANGED
@@ -353,6 +353,7 @@ def prepare_inputs_inference(in_img, in_vid, frames, fps, dwpose, rembg_session,
353
  tpose.save("out/"+"tar_pose"+str(len(target_poses_cropped))+".png")
354
  target_poses_cropped.append(tpose)
355
 
 
356
  return in_img, target_poses_cropped, in_pose, target_poses_coords, frames
357
 
358
 
@@ -1128,6 +1129,9 @@ def run_train_impl(images, train_steps=100, modelId="fine_tuned_pcdms", bg_remov
1128
 
1129
  train(modelId, in_img, in_pose, train_imgs, train_poses, train_steps, pcdms_model, noise_scheduler, image_encoder_p, image_encoder_g, vae, unet, finetune, is_app)
1130
 
 
 
 
1131
  def run_train(images, train_steps=100, modelId="fine_tuned_pcdms", bg_remove=True, resize_inputs=True):
1132
  run_train_impl(images, train_steps, modelId, bg_remove, resize_inputs)
1133
 
@@ -1171,6 +1175,8 @@ def run_inference_impl(images, video_path, frames, train_steps=100, inference_st
1171
  #results = [img_pad(img, img_width, img_height, True) for img in results]
1172
 
1173
  print("Done!")
 
 
1174
 
1175
  return out_vid+'.webm', results, getThumbnails(results), target_poses_coords, getThumbnails(orig_frames)
1176
 
@@ -1205,6 +1211,8 @@ def generate_frame(images, target_poses, train_steps=100, inference_steps=10, mo
1205
  #results = [img_pad(img, img_width, img_height, True) for img in results]
1206
 
1207
  print("Done!")
 
 
1208
 
1209
  results[0].save('result.png')
1210
 
 
353
  tpose.save("out/"+"tar_pose"+str(len(target_poses_cropped))+".png")
354
  target_poses_cropped.append(tpose)
355
 
356
+ target_poses_cropped[0].save("pose.png")
357
  return in_img, target_poses_cropped, in_pose, target_poses_coords, frames
358
 
359
 
 
1129
 
1130
  train(modelId, in_img, in_pose, train_imgs, train_poses, train_steps, pcdms_model, noise_scheduler, image_encoder_p, image_encoder_g, vae, unet, finetune, is_app)
1131
 
1132
+ gc.collect()
1133
+ torch.cuda.empty_cache()
1134
+
1135
  def run_train(images, train_steps=100, modelId="fine_tuned_pcdms", bg_remove=True, resize_inputs=True):
1136
  run_train_impl(images, train_steps, modelId, bg_remove, resize_inputs)
1137
 
 
1175
  #results = [img_pad(img, img_width, img_height, True) for img in results]
1176
 
1177
  print("Done!")
1178
+ gc.collect()
1179
+ torch.cuda.empty_cache()
1180
 
1181
  return out_vid+'.webm', results, getThumbnails(results), target_poses_coords, getThumbnails(orig_frames)
1182
 
 
1211
  #results = [img_pad(img, img_width, img_height, True) for img in results]
1212
 
1213
  print("Done!")
1214
+ gc.collect()
1215
+ torch.cuda.empty_cache()
1216
 
1217
  results[0].save('result.png')
1218