jonigata commited on
Commit
ec80d0f
1 Parent(s): 4521893

add body estimation

Browse files
Files changed (6) hide show
  1. app.py +47 -8
  2. requirements.txt +8 -0
  3. src/body.py +218 -0
  4. src/model.py +219 -0
  5. src/util.py +198 -0
  6. static/poseEditor.js +8 -4
app.py CHANGED
@@ -1,5 +1,21 @@
1
  import gradio as gr
 
 
2
  from fastapi import FastAPI, Request, Response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  with open("static/poseEditor.js", "r") as f:
5
  file_contents = f.read()
@@ -35,11 +51,32 @@ async def some_fastapi_middleware(request: Request, call_next):
35
 
36
  return response
37
 
38
- def greet(source):
39
- print("greet")
40
- print(source)
41
- return ("Hello ", 768, 768)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
 
 
 
 
 
 
43
  html_text = f"""
44
  <canvas id="canvas" width="512" height="512"></canvas>
45
  <script type="text/javascript" defer>{file_contents}</script>
@@ -52,19 +89,21 @@ with gr.Blocks() as demo:
52
  width = gr.Slider(label="Width", mininmum=512, maximum=1024, step=64, value=512, key="Width", interactive=True)
53
  height = gr.Slider(label="Height", mininmum=512, maximum=1024, step=64, value=512, key="Height", interactive=True)
54
  startBtn = gr.Button(value="Start edit")
 
55
  with gr.Column(scale=2):
56
  gr.HTML("<ul><li>ctrl + drag to scale</li><li>alt + drag to translate</li><li>shift + drag to rotate(move right first, then up or down)</li></ul>")
57
  html = gr.HTML(html_text)
58
  saveBtn = gr.Button(value="Save")
59
 
60
  source.change(
61
- fn=lambda x: x.size,
62
  inputs = [source],
63
- outputs = [width, height])
64
  startBtn.click(
65
  fn = None,
66
- inputs = [width, height], outputs = [],
67
- _js="(w, h) => { initializePose(w,h); return []; }")
 
68
  saveBtn.click(
69
  fn = None,
70
  inputs = [], outputs = [],
 
1
  import gradio as gr
2
+ import numpy as np
3
+ import cv2
4
  from fastapi import FastAPI, Request, Response
5
+ from src.body import Body
6
+
7
+ body_estimation = Body('model/body_pose_model.pth')
8
+
9
+ def pil2cv(image):
10
+ ''' PIL型 -> OpenCV型 '''
11
+ new_image = np.array(image, dtype=np.uint8)
12
+ if new_image.ndim == 2: # モノクロ
13
+ pass
14
+ elif new_image.shape[2] == 3: # カラー
15
+ new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR)
16
+ elif new_image.shape[2] == 4: # 透過
17
+ new_image = cv2.cvtColor(new_image, cv2.COLOR_RGBA2BGRA)
18
+ return new_image
19
 
20
  with open("static/poseEditor.js", "r") as f:
21
  file_contents = f.read()
 
51
 
52
  return response
53
 
54
+ # make cndidate to json
55
+ def candidate_to_json_string(arr):
56
+ a = [f'[{x:.2f}, {y:.2f}]' for x, y, *_ in arr]
57
+ return '[' + ', '.join(a) + ']'
58
+
59
+ # make subset to json
60
+ def subset_to_json_string(arr):
61
+ arr_str = ','.join(['[' + ','.join([f'{num:.2f}' for num in row]) + ']' for row in arr])
62
+ return '[' + arr_str + ']'
63
+
64
+ def estimate_body(source):
65
+ print("estimate_body")
66
+ if source == None:
67
+ return None
68
+
69
+ candidate, subset = body_estimation(pil2cv(source))
70
+ print(candidate_to_json_string(candidate))
71
+ print(subset_to_json_string(subset))
72
+ return "{ \"candidate\": " + candidate_to_json_string(candidate) + ", \"subset\": " + subset_to_json_string(subset) + " }"
73
 
74
+ def image_changed(image):
75
+ if (image == None):
76
+ return None
77
+ json = estimate_body(image)
78
+ return json, image.width, image.height
79
+
80
  html_text = f"""
81
  <canvas id="canvas" width="512" height="512"></canvas>
82
  <script type="text/javascript" defer>{file_contents}</script>
 
89
  width = gr.Slider(label="Width", mininmum=512, maximum=1024, step=64, value=512, key="Width", interactive=True)
90
  height = gr.Slider(label="Height", mininmum=512, maximum=1024, step=64, value=512, key="Height", interactive=True)
91
  startBtn = gr.Button(value="Start edit")
92
+ json = gr.JSON(label="Body")
93
  with gr.Column(scale=2):
94
  gr.HTML("<ul><li>ctrl + drag to scale</li><li>alt + drag to translate</li><li>shift + drag to rotate(move right first, then up or down)</li></ul>")
95
  html = gr.HTML(html_text)
96
  saveBtn = gr.Button(value="Save")
97
 
98
  source.change(
99
+ fn = image_changed,
100
  inputs = [source],
101
+ outputs = [json, width, height])
102
  startBtn.click(
103
  fn = None,
104
+ inputs = [json, width, height],
105
+ outputs = [],
106
+ _js="(json, w, h) => { initializePose(json,w,h); return []; }")
107
  saveBtn.click(
108
  fn = None,
109
  inputs = [], outputs = [],
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.92.0
2
+ gradio==3.18.0
3
+ matplotlib==3.7.0
4
+ numpy==1.24.2
5
+ opencv_python==4.7.0.68
6
+ scipy==1.10.0
7
+ torch==1.13.1
8
+ torchvision==0.14.1
src/body.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import math
4
+ import time
5
+ from scipy.ndimage.filters import gaussian_filter
6
+ import matplotlib.pyplot as plt
7
+ import matplotlib
8
+ import torch
9
+ from torchvision import transforms
10
+
11
+ from src import util
12
+ from src.model import bodypose_model
13
+
14
+ class Body(object):
15
+ def __init__(self, model_path):
16
+ self.model = bodypose_model()
17
+ if torch.cuda.is_available():
18
+ self.model = self.model.cuda()
19
+ model_dict = util.transfer(self.model, torch.load(model_path))
20
+ self.model.load_state_dict(model_dict)
21
+ self.model.eval()
22
+
23
+ def __call__(self, oriImg):
24
+ # scale_search = [0.5, 1.0, 1.5, 2.0]
25
+ scale_search = [0.5]
26
+ boxsize = 368
27
+ stride = 8
28
+ padValue = 128
29
+ thre1 = 0.1
30
+ thre2 = 0.05
31
+ multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
32
+ heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
33
+ paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
34
+
35
+ for m in range(len(multiplier)):
36
+ scale = multiplier[m]
37
+ imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
38
+ imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
39
+ im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
40
+ im = np.ascontiguousarray(im)
41
+
42
+ data = torch.from_numpy(im).float()
43
+ if torch.cuda.is_available():
44
+ data = data.cuda()
45
+ # data = data.permute([2, 0, 1]).unsqueeze(0).float()
46
+ with torch.no_grad():
47
+ Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
48
+ Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
49
+ Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
50
+
51
+ # extract outputs, resize, and remove padding
52
+ # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
53
+ heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps
54
+ heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
55
+ heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
56
+ heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
57
+
58
+ # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
59
+ paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs
60
+ paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
61
+ paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
62
+ paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
63
+
64
+ heatmap_avg += heatmap_avg + heatmap / len(multiplier)
65
+ paf_avg += + paf / len(multiplier)
66
+
67
+ all_peaks = []
68
+ peak_counter = 0
69
+
70
+ for part in range(18):
71
+ map_ori = heatmap_avg[:, :, part]
72
+ one_heatmap = gaussian_filter(map_ori, sigma=3)
73
+
74
+ map_left = np.zeros(one_heatmap.shape)
75
+ map_left[1:, :] = one_heatmap[:-1, :]
76
+ map_right = np.zeros(one_heatmap.shape)
77
+ map_right[:-1, :] = one_heatmap[1:, :]
78
+ map_up = np.zeros(one_heatmap.shape)
79
+ map_up[:, 1:] = one_heatmap[:, :-1]
80
+ map_down = np.zeros(one_heatmap.shape)
81
+ map_down[:, :-1] = one_heatmap[:, 1:]
82
+
83
+ peaks_binary = np.logical_and.reduce(
84
+ (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1))
85
+ peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
86
+ peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
87
+ peak_id = range(peak_counter, peak_counter + len(peaks))
88
+ peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))]
89
+
90
+ all_peaks.append(peaks_with_score_and_id)
91
+ peak_counter += len(peaks)
92
+
93
+ # find connection in the specified sequence, center 29 is in the position 15
94
+ limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
95
+ [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
96
+ [1, 16], [16, 18], [3, 17], [6, 18]]
97
+ # the middle joints heatmap correpondence
98
+ mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \
99
+ [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \
100
+ [55, 56], [37, 38], [45, 46]]
101
+
102
+ connection_all = []
103
+ special_k = []
104
+ mid_num = 10
105
+
106
+ for k in range(len(mapIdx)):
107
+ score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
108
+ candA = all_peaks[limbSeq[k][0] - 1]
109
+ candB = all_peaks[limbSeq[k][1] - 1]
110
+ nA = len(candA)
111
+ nB = len(candB)
112
+ indexA, indexB = limbSeq[k]
113
+ if (nA != 0 and nB != 0):
114
+ connection_candidate = []
115
+ for i in range(nA):
116
+ for j in range(nB):
117
+ vec = np.subtract(candB[j][:2], candA[i][:2])
118
+ norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
119
+ norm = max(0.001, norm)
120
+ vec = np.divide(vec, norm)
121
+
122
+ startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
123
+ np.linspace(candA[i][1], candB[j][1], num=mid_num)))
124
+
125
+ vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
126
+ for I in range(len(startend))])
127
+ vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
128
+ for I in range(len(startend))])
129
+
130
+ score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
131
+ score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
132
+ 0.5 * oriImg.shape[0] / norm - 1, 0)
133
+ criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts)
134
+ criterion2 = score_with_dist_prior > 0
135
+ if criterion1 and criterion2:
136
+ connection_candidate.append(
137
+ [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
138
+
139
+ connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
140
+ connection = np.zeros((0, 5))
141
+ for c in range(len(connection_candidate)):
142
+ i, j, s = connection_candidate[c][0:3]
143
+ if (i not in connection[:, 3] and j not in connection[:, 4]):
144
+ connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
145
+ if (len(connection) >= min(nA, nB)):
146
+ break
147
+
148
+ connection_all.append(connection)
149
+ else:
150
+ special_k.append(k)
151
+ connection_all.append([])
152
+
153
+ # last number in each row is the total parts number of that person
154
+ # the second last number in each row is the score of the overall configuration
155
+ subset = -1 * np.ones((0, 20))
156
+ candidate = np.array([item for sublist in all_peaks for item in sublist])
157
+
158
+ for k in range(len(mapIdx)):
159
+ if k not in special_k:
160
+ partAs = connection_all[k][:, 0]
161
+ partBs = connection_all[k][:, 1]
162
+ indexA, indexB = np.array(limbSeq[k]) - 1
163
+
164
+ for i in range(len(connection_all[k])): # = 1:size(temp,1)
165
+ found = 0
166
+ subset_idx = [-1, -1]
167
+ for j in range(len(subset)): # 1:size(subset,1):
168
+ if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
169
+ subset_idx[found] = j
170
+ found += 1
171
+
172
+ if found == 1:
173
+ j = subset_idx[0]
174
+ if subset[j][indexB] != partBs[i]:
175
+ subset[j][indexB] = partBs[i]
176
+ subset[j][-1] += 1
177
+ subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
178
+ elif found == 2: # if found 2 and disjoint, merge them
179
+ j1, j2 = subset_idx
180
+ membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
181
+ if len(np.nonzero(membership == 2)[0]) == 0: # merge
182
+ subset[j1][:-2] += (subset[j2][:-2] + 1)
183
+ subset[j1][-2:] += subset[j2][-2:]
184
+ subset[j1][-2] += connection_all[k][i][2]
185
+ subset = np.delete(subset, j2, 0)
186
+ else: # as like found == 1
187
+ subset[j1][indexB] = partBs[i]
188
+ subset[j1][-1] += 1
189
+ subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
190
+
191
+ # if find no partA in the subset, create a new subset
192
+ elif not found and k < 17:
193
+ row = -1 * np.ones(20)
194
+ row[indexA] = partAs[i]
195
+ row[indexB] = partBs[i]
196
+ row[-1] = 2
197
+ row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
198
+ subset = np.vstack([subset, row])
199
+ # delete some rows of subset which has few parts occur
200
+ deleteIdx = []
201
+ for i in range(len(subset)):
202
+ if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
203
+ deleteIdx.append(i)
204
+ subset = np.delete(subset, deleteIdx, axis=0)
205
+
206
+ # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
207
+ # candidate: x, y, score, id
208
+ return candidate, subset
209
+
210
+ if __name__ == "__main__":
211
+ body_estimation = Body('../model/body_pose_model.pth')
212
+
213
+ test_image = '../images/ski.jpg'
214
+ oriImg = cv2.imread(test_image) # B,G,R order
215
+ candidate, subset = body_estimation(oriImg)
216
+ canvas = util.draw_bodypose(oriImg, candidate, subset)
217
+ plt.imshow(canvas[:, :, [2, 1, 0]])
218
+ plt.show()
src/model.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from collections import OrderedDict
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+ def make_layers(block, no_relu_layers):
8
+ layers = []
9
+ for layer_name, v in block.items():
10
+ if 'pool' in layer_name:
11
+ layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1],
12
+ padding=v[2])
13
+ layers.append((layer_name, layer))
14
+ else:
15
+ conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
16
+ kernel_size=v[2], stride=v[3],
17
+ padding=v[4])
18
+ layers.append((layer_name, conv2d))
19
+ if layer_name not in no_relu_layers:
20
+ layers.append(('relu_'+layer_name, nn.ReLU(inplace=True)))
21
+
22
+ return nn.Sequential(OrderedDict(layers))
23
+
24
+ class bodypose_model(nn.Module):
25
+ def __init__(self):
26
+ super(bodypose_model, self).__init__()
27
+
28
+ # these layers have no relu layer
29
+ no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\
30
+ 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\
31
+ 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\
32
+ 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1']
33
+ blocks = {}
34
+ block0 = OrderedDict([
35
+ ('conv1_1', [3, 64, 3, 1, 1]),
36
+ ('conv1_2', [64, 64, 3, 1, 1]),
37
+ ('pool1_stage1', [2, 2, 0]),
38
+ ('conv2_1', [64, 128, 3, 1, 1]),
39
+ ('conv2_2', [128, 128, 3, 1, 1]),
40
+ ('pool2_stage1', [2, 2, 0]),
41
+ ('conv3_1', [128, 256, 3, 1, 1]),
42
+ ('conv3_2', [256, 256, 3, 1, 1]),
43
+ ('conv3_3', [256, 256, 3, 1, 1]),
44
+ ('conv3_4', [256, 256, 3, 1, 1]),
45
+ ('pool3_stage1', [2, 2, 0]),
46
+ ('conv4_1', [256, 512, 3, 1, 1]),
47
+ ('conv4_2', [512, 512, 3, 1, 1]),
48
+ ('conv4_3_CPM', [512, 256, 3, 1, 1]),
49
+ ('conv4_4_CPM', [256, 128, 3, 1, 1])
50
+ ])
51
+
52
+
53
+ # Stage 1
54
+ block1_1 = OrderedDict([
55
+ ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]),
56
+ ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]),
57
+ ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]),
58
+ ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]),
59
+ ('conv5_5_CPM_L1', [512, 38, 1, 1, 0])
60
+ ])
61
+
62
+ block1_2 = OrderedDict([
63
+ ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]),
64
+ ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]),
65
+ ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]),
66
+ ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]),
67
+ ('conv5_5_CPM_L2', [512, 19, 1, 1, 0])
68
+ ])
69
+ blocks['block1_1'] = block1_1
70
+ blocks['block1_2'] = block1_2
71
+
72
+ self.model0 = make_layers(block0, no_relu_layers)
73
+
74
+ # Stages 2 - 6
75
+ for i in range(2, 7):
76
+ blocks['block%d_1' % i] = OrderedDict([
77
+ ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]),
78
+ ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]),
79
+ ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
80
+ ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]),
81
+ ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]),
82
+ ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]),
83
+ ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])
84
+ ])
85
+
86
+ blocks['block%d_2' % i] = OrderedDict([
87
+ ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]),
88
+ ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]),
89
+ ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
90
+ ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]),
91
+ ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]),
92
+ ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]),
93
+ ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])
94
+ ])
95
+
96
+ for k in blocks.keys():
97
+ blocks[k] = make_layers(blocks[k], no_relu_layers)
98
+
99
+ self.model1_1 = blocks['block1_1']
100
+ self.model2_1 = blocks['block2_1']
101
+ self.model3_1 = blocks['block3_1']
102
+ self.model4_1 = blocks['block4_1']
103
+ self.model5_1 = blocks['block5_1']
104
+ self.model6_1 = blocks['block6_1']
105
+
106
+ self.model1_2 = blocks['block1_2']
107
+ self.model2_2 = blocks['block2_2']
108
+ self.model3_2 = blocks['block3_2']
109
+ self.model4_2 = blocks['block4_2']
110
+ self.model5_2 = blocks['block5_2']
111
+ self.model6_2 = blocks['block6_2']
112
+
113
+
114
+ def forward(self, x):
115
+
116
+ out1 = self.model0(x)
117
+
118
+ out1_1 = self.model1_1(out1)
119
+ out1_2 = self.model1_2(out1)
120
+ out2 = torch.cat([out1_1, out1_2, out1], 1)
121
+
122
+ out2_1 = self.model2_1(out2)
123
+ out2_2 = self.model2_2(out2)
124
+ out3 = torch.cat([out2_1, out2_2, out1], 1)
125
+
126
+ out3_1 = self.model3_1(out3)
127
+ out3_2 = self.model3_2(out3)
128
+ out4 = torch.cat([out3_1, out3_2, out1], 1)
129
+
130
+ out4_1 = self.model4_1(out4)
131
+ out4_2 = self.model4_2(out4)
132
+ out5 = torch.cat([out4_1, out4_2, out1], 1)
133
+
134
+ out5_1 = self.model5_1(out5)
135
+ out5_2 = self.model5_2(out5)
136
+ out6 = torch.cat([out5_1, out5_2, out1], 1)
137
+
138
+ out6_1 = self.model6_1(out6)
139
+ out6_2 = self.model6_2(out6)
140
+
141
+ return out6_1, out6_2
142
+
143
+ class handpose_model(nn.Module):
144
+ def __init__(self):
145
+ super(handpose_model, self).__init__()
146
+
147
+ # these layers have no relu layer
148
+ no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\
149
+ 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
150
+ # stage 1
151
+ block1_0 = OrderedDict([
152
+ ('conv1_1', [3, 64, 3, 1, 1]),
153
+ ('conv1_2', [64, 64, 3, 1, 1]),
154
+ ('pool1_stage1', [2, 2, 0]),
155
+ ('conv2_1', [64, 128, 3, 1, 1]),
156
+ ('conv2_2', [128, 128, 3, 1, 1]),
157
+ ('pool2_stage1', [2, 2, 0]),
158
+ ('conv3_1', [128, 256, 3, 1, 1]),
159
+ ('conv3_2', [256, 256, 3, 1, 1]),
160
+ ('conv3_3', [256, 256, 3, 1, 1]),
161
+ ('conv3_4', [256, 256, 3, 1, 1]),
162
+ ('pool3_stage1', [2, 2, 0]),
163
+ ('conv4_1', [256, 512, 3, 1, 1]),
164
+ ('conv4_2', [512, 512, 3, 1, 1]),
165
+ ('conv4_3', [512, 512, 3, 1, 1]),
166
+ ('conv4_4', [512, 512, 3, 1, 1]),
167
+ ('conv5_1', [512, 512, 3, 1, 1]),
168
+ ('conv5_2', [512, 512, 3, 1, 1]),
169
+ ('conv5_3_CPM', [512, 128, 3, 1, 1])
170
+ ])
171
+
172
+ block1_1 = OrderedDict([
173
+ ('conv6_1_CPM', [128, 512, 1, 1, 0]),
174
+ ('conv6_2_CPM', [512, 22, 1, 1, 0])
175
+ ])
176
+
177
+ blocks = {}
178
+ blocks['block1_0'] = block1_0
179
+ blocks['block1_1'] = block1_1
180
+
181
+ # stage 2-6
182
+ for i in range(2, 7):
183
+ blocks['block%d' % i] = OrderedDict([
184
+ ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]),
185
+ ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]),
186
+ ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]),
187
+ ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]),
188
+ ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]),
189
+ ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]),
190
+ ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])
191
+ ])
192
+
193
+ for k in blocks.keys():
194
+ blocks[k] = make_layers(blocks[k], no_relu_layers)
195
+
196
+ self.model1_0 = blocks['block1_0']
197
+ self.model1_1 = blocks['block1_1']
198
+ self.model2 = blocks['block2']
199
+ self.model3 = blocks['block3']
200
+ self.model4 = blocks['block4']
201
+ self.model5 = blocks['block5']
202
+ self.model6 = blocks['block6']
203
+
204
+ def forward(self, x):
205
+ out1_0 = self.model1_0(x)
206
+ out1_1 = self.model1_1(out1_0)
207
+ concat_stage2 = torch.cat([out1_1, out1_0], 1)
208
+ out_stage2 = self.model2(concat_stage2)
209
+ concat_stage3 = torch.cat([out_stage2, out1_0], 1)
210
+ out_stage3 = self.model3(concat_stage3)
211
+ concat_stage4 = torch.cat([out_stage3, out1_0], 1)
212
+ out_stage4 = self.model4(concat_stage4)
213
+ concat_stage5 = torch.cat([out_stage4, out1_0], 1)
214
+ out_stage5 = self.model5(concat_stage5)
215
+ concat_stage6 = torch.cat([out_stage5, out1_0], 1)
216
+ out_stage6 = self.model6(concat_stage6)
217
+ return out_stage6
218
+
219
+
src/util.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import math
3
+ import cv2
4
+ import matplotlib
5
+ from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
6
+ from matplotlib.figure import Figure
7
+ import numpy as np
8
+ import matplotlib.pyplot as plt
9
+ import cv2
10
+
11
+
12
+ def padRightDownCorner(img, stride, padValue):
13
+ h = img.shape[0]
14
+ w = img.shape[1]
15
+
16
+ pad = 4 * [None]
17
+ pad[0] = 0 # up
18
+ pad[1] = 0 # left
19
+ pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
20
+ pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
21
+
22
+ img_padded = img
23
+ pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
24
+ img_padded = np.concatenate((pad_up, img_padded), axis=0)
25
+ pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
26
+ img_padded = np.concatenate((pad_left, img_padded), axis=1)
27
+ pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
28
+ img_padded = np.concatenate((img_padded, pad_down), axis=0)
29
+ pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
30
+ img_padded = np.concatenate((img_padded, pad_right), axis=1)
31
+
32
+ return img_padded, pad
33
+
34
+ # transfer caffe model to pytorch which will match the layer name
35
+ def transfer(model, model_weights):
36
+ transfered_model_weights = {}
37
+ for weights_name in model.state_dict().keys():
38
+ transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
39
+ return transfered_model_weights
40
+
41
+ # draw the body keypoint and lims
42
+ def draw_bodypose(canvas, candidate, subset):
43
+ stickwidth = 4
44
+ limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
45
+ [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
46
+ [1, 16], [16, 18], [3, 17], [6, 18]]
47
+
48
+ colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
49
+ [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
50
+ [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
51
+ for i in range(18):
52
+ for n in range(len(subset)):
53
+ index = int(subset[n][i])
54
+ if index == -1:
55
+ continue
56
+ x, y = candidate[index][0:2]
57
+ cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
58
+ for i in range(17):
59
+ for n in range(len(subset)):
60
+ index = subset[n][np.array(limbSeq[i]) - 1]
61
+ if -1 in index:
62
+ continue
63
+ cur_canvas = canvas.copy()
64
+ Y = candidate[index.astype(int), 0]
65
+ X = candidate[index.astype(int), 1]
66
+ mX = np.mean(X)
67
+ mY = np.mean(Y)
68
+ length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
69
+ angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
70
+ polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
71
+ cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
72
+ canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
73
+ # plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]])
74
+ # plt.imshow(canvas[:, :, [2, 1, 0]])
75
+ return canvas
76
+
77
+ def draw_handpose(canvas, all_hand_peaks, show_number=False):
78
+ edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
79
+ [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
80
+ fig = Figure(figsize=plt.figaspect(canvas))
81
+
82
+ fig.subplots_adjust(0, 0, 1, 1)
83
+ fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
84
+ bg = FigureCanvas(fig)
85
+ ax = fig.subplots()
86
+ ax.axis('off')
87
+ ax.imshow(canvas)
88
+
89
+ width, height = ax.figure.get_size_inches() * ax.figure.get_dpi()
90
+
91
+ for peaks in all_hand_peaks:
92
+ for ie, e in enumerate(edges):
93
+ if np.sum(np.all(peaks[e], axis=1)==0)==0:
94
+ x1, y1 = peaks[e[0]]
95
+ x2, y2 = peaks[e[1]]
96
+ ax.plot([x1, x2], [y1, y2], color=matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0]))
97
+
98
+ for i, keyponit in enumerate(peaks):
99
+ x, y = keyponit
100
+ ax.plot(x, y, 'r.')
101
+ if show_number:
102
+ ax.text(x, y, str(i))
103
+ bg.draw()
104
+ canvas = np.fromstring(bg.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3)
105
+ return canvas
106
+
107
+ # image drawed by opencv is not good.
108
+ def draw_handpose_by_opencv(canvas, peaks, show_number=False):
109
+ edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
110
+ [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
111
+ # cv2.rectangle(canvas, (x, y), (x+w, y+w), (0, 255, 0), 2, lineType=cv2.LINE_AA)
112
+ # cv2.putText(canvas, 'left' if is_left else 'right', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
113
+ for ie, e in enumerate(edges):
114
+ if np.sum(np.all(peaks[e], axis=1)==0)==0:
115
+ x1, y1 = peaks[e[0]]
116
+ x2, y2 = peaks[e[1]]
117
+ cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0])*255, thickness=2)
118
+
119
+ for i, keyponit in enumerate(peaks):
120
+ x, y = keyponit
121
+ cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
122
+ if show_number:
123
+ cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA)
124
+ return canvas
125
+
126
+ # detect hand according to body pose keypoints
127
+ # please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
128
+ def handDetect(candidate, subset, oriImg):
129
+ # right hand: wrist 4, elbow 3, shoulder 2
130
+ # left hand: wrist 7, elbow 6, shoulder 5
131
+ ratioWristElbow = 0.33
132
+ detect_result = []
133
+ image_height, image_width = oriImg.shape[0:2]
134
+ for person in subset.astype(int):
135
+ # if any of three not detected
136
+ has_left = np.sum(person[[5, 6, 7]] == -1) == 0
137
+ has_right = np.sum(person[[2, 3, 4]] == -1) == 0
138
+ if not (has_left or has_right):
139
+ continue
140
+ hands = []
141
+ #left hand
142
+ if has_left:
143
+ left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]]
144
+ x1, y1 = candidate[left_shoulder_index][:2]
145
+ x2, y2 = candidate[left_elbow_index][:2]
146
+ x3, y3 = candidate[left_wrist_index][:2]
147
+ hands.append([x1, y1, x2, y2, x3, y3, True])
148
+ # right hand
149
+ if has_right:
150
+ right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]]
151
+ x1, y1 = candidate[right_shoulder_index][:2]
152
+ x2, y2 = candidate[right_elbow_index][:2]
153
+ x3, y3 = candidate[right_wrist_index][:2]
154
+ hands.append([x1, y1, x2, y2, x3, y3, False])
155
+
156
+ for x1, y1, x2, y2, x3, y3, is_left in hands:
157
+ # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
158
+ # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
159
+ # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
160
+ # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
161
+ # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
162
+ # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
163
+ x = x3 + ratioWristElbow * (x3 - x2)
164
+ y = y3 + ratioWristElbow * (y3 - y2)
165
+ distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
166
+ distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
167
+ width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
168
+ # x-y refers to the center --> offset to topLeft point
169
+ # handRectangle.x -= handRectangle.width / 2.f;
170
+ # handRectangle.y -= handRectangle.height / 2.f;
171
+ x -= width / 2
172
+ y -= width / 2 # width = height
173
+ # overflow the image
174
+ if x < 0: x = 0
175
+ if y < 0: y = 0
176
+ width1 = width
177
+ width2 = width
178
+ if x + width > image_width: width1 = image_width - x
179
+ if y + width > image_height: width2 = image_height - y
180
+ width = min(width1, width2)
181
+ # the max hand box value is 20 pixels
182
+ if width >= 20:
183
+ detect_result.append([int(x), int(y), int(width), is_left])
184
+
185
+ '''
186
+ return value: [[x, y, w, True if left hand else False]].
187
+ width=height since the network require squared input.
188
+ x, y is the coordinate of top left
189
+ '''
190
+ return detect_result
191
+
192
+ # get max index of 2d array
193
+ def npmax(array):
194
+ arrayindex = array.argmax(1)
195
+ arrayvalue = array.max(1)
196
+ i = arrayvalue.argmax()
197
+ j = arrayindex[i]
198
+ return i, j
static/poseEditor.js CHANGED
@@ -3,7 +3,7 @@ var canvas = null;
3
  var ctx = null;
4
 
5
  // candidateの形式:[[x1, y1, score1], [x2, y2, score2], ...]
6
- const candidateSource = [
7
  [235, 158, 0.93167633],
8
  [234, 220, 0.97106987],
9
  [193, 222, 0.93366587],
@@ -25,12 +25,12 @@ const candidateSource = [
25
  ];
26
 
27
  // subsetの形式:[[index1, index2, ..., -1], [index1, index2, ..., -1], ...]
28
- const subset = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 33.81122635, 18]];
29
 
30
  // const candidateSource = [[618.00, 0.00], [618.00, 44.00], [304.00, 81.00], [482.00, 96.00], [66.00, 270.00], [171.00, 280.00], [618.00, 82.00], [307.00, 112.00], [460.00, 143.00], [0.00, 301.00], [65.00, 301.00], [172.00, 303.00], [584.00, 86.00], [275.00, 119.00], [420.00, 139.00], [0.00, 301.00], [41.00, 301.00], [144.00, 303.00], [544.00, 131.00], [348.00, 139.00], [262.00, 160.00], [0.00, 337.00], [52.00, 339.00], [130.00, 348.00], [570.00, 175.00], [283.00, 177.00], [78.00, 338.00], [172.00, 380.00], [651.00, 78.00], [338.00, 111.00], [505.00, 144.00], [92.00, 301.00], [198.00, 305.00], [661.00, 132.00], [349.00, 156.00], [541.00, 179.00], [106.00, 336.00], [203.00, 348.00], [305.00, 159.00], [665.00, 160.00], [563.00, 192.00], [80.00, 343.00], [181.00, 385.00], [614.00, 205.00], [291.00, 220.00], [432.00, 320.00], [152.00, 372.00], [43.00, 380.00], [0.00, 386.00], [623.00, 281.00], [306.00, 290.00], [92.00, 357.00], [509.00, 434.00], [304.00, 357.00], [622.00, 368.00], [47.00, 394.00], [0.00, 395.00], [142.00, 405.00], [535.00, 565.00], [655.00, 200.00], [337.00, 217.00], [467.00, 322.00], [191.00, 372.00], [83.00, 375.00], [344.00, 282.00], [655.00, 282.00], [103.00, 343.00], [237.00, 368.00], [22.00, 377.00], [0.00, 379.00], [460.00, 459.00], [305.00, 352.00], [638.00, 355.00], [0.00, 401.00], [110.00, 412.00], [411.00, 570.00], [608.00, 0.00], [608.00, 40.00], [297.00, 75.00], [469.00, 84.00], [0.00, 261.00], [58.00, 263.00], [165.00, 275.00], [625.00, 0.00], [625.00, 39.00], [309.00, 74.00], [486.00, 83.00], [71.00, 264.00], [180.00, 276.00], [599.00, 0.00], [599.00, 44.00], [284.00, 80.00], [440.00, 93.00], [48.00, 271.00], [0.00, 272.00], [157.00, 277.00], [634.00, 0.00], [633.00, 41.00], [319.00, 77.00], [79.00, 269.00], [190.00, 277.00]];
31
  // const subset = [[1.00,6.00,12.00,18.00,24.00,28.00,33.00,39.00,43.00,49.00,54.00,59.00,65.00,72.00,77.00,84.00,90.00,97.00,32.98,18.00],[5.00,11.00,17.00,23.00,27.00,32.00,37.00,42.00,46.00,-1.00,-1.00,62.00,67.00,-1.00,82.00,88.00,95.00,100.00,25.45,15.00],[4.00,10.00,16.00,22.00,26.00,31.00,36.00,41.00,47.00,51.00,57.00,63.00,66.00,74.00,81.00,87.00,93.00,99.00,26.97,18.00],[3.00,8.00,14.00,19.00,25.00,30.00,35.00,40.00,45.00,52.00,58.00,61.00,70.00,75.00,79.00,86.00,92.00,-1.00,30.45,17.00],[2.00,7.00,13.00,20.00,-1.00,29.00,34.00,38.00,44.00,50.00,53.00,60.00,64.00,71.00,78.00,85.00,91.00,98.00,27.89,17.00],[0.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,76.00,83.00,-1.00,96.00,3.33,4.00]];
32
 
33
- const candidate = candidateSource.map(point => [point[0], point[1] - 70]);
34
 
35
 
36
  function clearCanvas() {
@@ -206,7 +206,11 @@ function handleMouseUp(event) {
206
  isDragging = false;
207
  }
208
 
209
- function initializePose(w,h) {
 
 
 
 
210
  canvas = document.getElementById('canvas');
211
  ctx = canvas.getContext('2d');
212
 
 
3
  var ctx = null;
4
 
5
  // candidateの形式:[[x1, y1, score1], [x2, y2, score2], ...]
6
+ let candidateSource = [
7
  [235, 158, 0.93167633],
8
  [234, 220, 0.97106987],
9
  [193, 222, 0.93366587],
 
25
  ];
26
 
27
  // subsetの形式:[[index1, index2, ..., -1], [index1, index2, ..., -1], ...]
28
+ let subset = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 33.81122635, 18]];
29
 
30
  // const candidateSource = [[618.00, 0.00], [618.00, 44.00], [304.00, 81.00], [482.00, 96.00], [66.00, 270.00], [171.00, 280.00], [618.00, 82.00], [307.00, 112.00], [460.00, 143.00], [0.00, 301.00], [65.00, 301.00], [172.00, 303.00], [584.00, 86.00], [275.00, 119.00], [420.00, 139.00], [0.00, 301.00], [41.00, 301.00], [144.00, 303.00], [544.00, 131.00], [348.00, 139.00], [262.00, 160.00], [0.00, 337.00], [52.00, 339.00], [130.00, 348.00], [570.00, 175.00], [283.00, 177.00], [78.00, 338.00], [172.00, 380.00], [651.00, 78.00], [338.00, 111.00], [505.00, 144.00], [92.00, 301.00], [198.00, 305.00], [661.00, 132.00], [349.00, 156.00], [541.00, 179.00], [106.00, 336.00], [203.00, 348.00], [305.00, 159.00], [665.00, 160.00], [563.00, 192.00], [80.00, 343.00], [181.00, 385.00], [614.00, 205.00], [291.00, 220.00], [432.00, 320.00], [152.00, 372.00], [43.00, 380.00], [0.00, 386.00], [623.00, 281.00], [306.00, 290.00], [92.00, 357.00], [509.00, 434.00], [304.00, 357.00], [622.00, 368.00], [47.00, 394.00], [0.00, 395.00], [142.00, 405.00], [535.00, 565.00], [655.00, 200.00], [337.00, 217.00], [467.00, 322.00], [191.00, 372.00], [83.00, 375.00], [344.00, 282.00], [655.00, 282.00], [103.00, 343.00], [237.00, 368.00], [22.00, 377.00], [0.00, 379.00], [460.00, 459.00], [305.00, 352.00], [638.00, 355.00], [0.00, 401.00], [110.00, 412.00], [411.00, 570.00], [608.00, 0.00], [608.00, 40.00], [297.00, 75.00], [469.00, 84.00], [0.00, 261.00], [58.00, 263.00], [165.00, 275.00], [625.00, 0.00], [625.00, 39.00], [309.00, 74.00], [486.00, 83.00], [71.00, 264.00], [180.00, 276.00], [599.00, 0.00], [599.00, 44.00], [284.00, 80.00], [440.00, 93.00], [48.00, 271.00], [0.00, 272.00], [157.00, 277.00], [634.00, 0.00], [633.00, 41.00], [319.00, 77.00], [79.00, 269.00], [190.00, 277.00]];
31
  // const subset = [[1.00,6.00,12.00,18.00,24.00,28.00,33.00,39.00,43.00,49.00,54.00,59.00,65.00,72.00,77.00,84.00,90.00,97.00,32.98,18.00],[5.00,11.00,17.00,23.00,27.00,32.00,37.00,42.00,46.00,-1.00,-1.00,62.00,67.00,-1.00,82.00,88.00,95.00,100.00,25.45,15.00],[4.00,10.00,16.00,22.00,26.00,31.00,36.00,41.00,47.00,51.00,57.00,63.00,66.00,74.00,81.00,87.00,93.00,99.00,26.97,18.00],[3.00,8.00,14.00,19.00,25.00,30.00,35.00,40.00,45.00,52.00,58.00,61.00,70.00,75.00,79.00,86.00,92.00,-1.00,30.45,17.00],[2.00,7.00,13.00,20.00,-1.00,29.00,34.00,38.00,44.00,50.00,53.00,60.00,64.00,71.00,78.00,85.00,91.00,98.00,27.89,17.00],[0.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,-1.00,76.00,83.00,-1.00,96.00,3.33,4.00]];
32
 
33
+ let candidate = candidateSource.map(point => [point[0], point[1] - 70]);
34
 
35
 
36
  function clearCanvas() {
 
206
  isDragging = false;
207
  }
208
 
209
+ function initializePose(jsonData,w,h) {
210
+ console.log("initializePose");
211
+ candidate = jsonData.candidate;
212
+ subset = jsonData.subset;
213
+
214
  canvas = document.getElementById('canvas');
215
  ctx = canvas.getContext('2d');
216