tranzmatt commited on
Commit
8503a3c
1 Parent(s): fed77dc

Initial Upload

Browse files
Files changed (6) hide show
  1. .gitignore +163 -0
  2. .gitmodules +3 -0
  3. LICENSE +21 -0
  4. openpose-converter.py +316 -0
  5. requirements.txt +8 -0
  6. setup.py +8 -0
.gitignore ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
161
+
162
+ model/
163
+ .idea
.gitmodules ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [submodule "openpose"]
2
+ path = openpose
3
+ url = https://github.com/tranzmatt/openpose-library.git
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
openpose-converter.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # This code from https://github.com/lllyasviel/ControlNet
3
+
4
+ import os
5
+ import argparse
6
+ import cv2
7
+ import numpy as np
8
+ import math
9
+ import glob
10
+ from scipy.ndimage import gaussian_filter
11
+ import matplotlib.pyplot as plt
12
+ import torch
13
+
14
+ import urllib
15
+ import openpose.util as util
16
+ from openpose.model import bodypose_model
17
+ import json
18
+
19
+
20
+ class Body(object):
21
+ def __init__(self, the_model_path):
22
+ self.model = bodypose_model()
23
+ if torch.cuda.is_available():
24
+ self.model = self.model.cuda()
25
+ model_dict = util.transfer(self.model, torch.load(the_model_path))
26
+ self.model.load_state_dict(model_dict)
27
+ self.model.eval()
28
+
29
+ def __call__(self, ori_img):
30
+ # scale_search = [0.5, 1.0, 1.5, 2.0]
31
+ scale_search = [0.5]
32
+ boxsize = 368
33
+ stride = 8
34
+ padValue = 128
35
+ threshold1 = 0.1
36
+ threshold2 = 0.05
37
+ multiplier = [x * boxsize / ori_img.shape[0] for x in scale_search]
38
+ heatmap_avg = np.zeros((ori_img.shape[0], ori_img.shape[1], 19))
39
+ paf_avg = np.zeros((ori_img.shape[0], ori_img.shape[1], 38))
40
+
41
+ for m in range(len(multiplier)):
42
+ scale = multiplier[m]
43
+ imageToTest = cv2.resize(ori_img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
44
+ imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
45
+ im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
46
+ im = np.ascontiguousarray(im)
47
+
48
+ data = torch.from_numpy(im).float()
49
+ if torch.cuda.is_available():
50
+ data = data.cuda()
51
+ # data = data.permute([2, 0, 1]).unsqueeze(0).float()
52
+ with torch.no_grad():
53
+ Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
54
+ Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
55
+ Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
56
+
57
+ # extract outputs, resize, and remove padding
58
+ # output 1 is heatmaps
59
+ # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0))
60
+ heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0))
61
+ heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
62
+ heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
63
+ heatmap = cv2.resize(heatmap, (ori_img.shape[1], ori_img.shape[0]), interpolation=cv2.INTER_CUBIC)
64
+
65
+ # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
66
+ paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs
67
+ paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
68
+ paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
69
+ paf = cv2.resize(paf, (ori_img.shape[1], ori_img.shape[0]), interpolation=cv2.INTER_CUBIC)
70
+
71
+ heatmap_avg += heatmap_avg + heatmap / len(multiplier)
72
+ paf_avg += + paf / len(multiplier)
73
+
74
+ all_peaks = []
75
+ peak_counter = 0
76
+
77
+ for part in range(18):
78
+ map_ori = heatmap_avg[:, :, part]
79
+ one_heatmap = gaussian_filter(map_ori, sigma=3)
80
+
81
+ map_left = np.zeros(one_heatmap.shape)
82
+ map_left[1:, :] = one_heatmap[:-1, :]
83
+ map_right = np.zeros(one_heatmap.shape)
84
+ map_right[:-1, :] = one_heatmap[1:, :]
85
+ map_up = np.zeros(one_heatmap.shape)
86
+ map_up[:, 1:] = one_heatmap[:, :-1]
87
+ map_down = np.zeros(one_heatmap.shape)
88
+ map_down[:, :-1] = one_heatmap[:, 1:]
89
+
90
+ peaks_binary = np.logical_and.reduce(
91
+ (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up,
92
+ one_heatmap >= map_down, one_heatmap > threshold1))
93
+ peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
94
+ peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
95
+ peak_id = range(peak_counter, peak_counter + len(peaks))
96
+ peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))]
97
+
98
+ all_peaks.append(peaks_with_score_and_id)
99
+ peak_counter += len(peaks)
100
+
101
+ # find connection in the specified sequence, center 29 is in the position 15
102
+ limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
103
+ [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17],
104
+ [1, 16], [16, 18], [3, 17], [6, 18]]
105
+ # the middle joints heatmap correspondence
106
+ mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22],
107
+ [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52],
108
+ [55, 56], [37, 38], [45, 46]]
109
+
110
+ connection_all = []
111
+ special_k = []
112
+ mid_num = 10
113
+
114
+ for k in range(len(mapIdx)):
115
+ score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
116
+ candA = all_peaks[limbSeq[k][0] - 1]
117
+ candB = all_peaks[limbSeq[k][1] - 1]
118
+ nA = len(candA)
119
+ nB = len(candB)
120
+ # indexA, indexB = limbSeq[k]
121
+ if nA != 0 and nB != 0:
122
+ connection_candidate = []
123
+ for i in range(nA):
124
+ for j in range(nB):
125
+ vec = np.subtract(candB[j][:2], candA[i][:2])
126
+ norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
127
+ norm = max(0.001, norm)
128
+ vec = np.divide(vec, norm)
129
+
130
+ startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num),
131
+ np.linspace(candA[i][1], candB[j][1], num=mid_num)))
132
+
133
+ vec_x = np.array([score_mid[int(round(startend[index][1])), int(round(startend[index][0])), 0]
134
+ for index in range(len(startend))])
135
+ vec_y = np.array([score_mid[int(round(startend[index][1])), int(round(startend[index][0])), 1]
136
+ for index in range(len(startend))])
137
+
138
+ score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
139
+ score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
140
+ 0.5 * ori_img.shape[0] / norm - 1, 0)
141
+ criterion1 = len(np.nonzero(score_midpts > threshold2)[0]) > 0.8 * len(score_midpts)
142
+ criterion2 = score_with_dist_prior > 0
143
+ if criterion1 and criterion2:
144
+ connection_candidate.append(
145
+ [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
146
+
147
+ connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
148
+ connection = np.zeros((0, 5))
149
+ for c in range(len(connection_candidate)):
150
+ i, j, s = connection_candidate[c][0:3]
151
+ if i not in connection[:, 3] and j not in connection[:, 4]:
152
+ connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
153
+ if len(connection) >= min(nA, nB):
154
+ break
155
+
156
+ connection_all.append(connection)
157
+ else:
158
+ special_k.append(k)
159
+ connection_all.append([])
160
+
161
+ # last number in each row is the total parts number of that person
162
+ # the second last number in each row is the score of the overall configuration
163
+ subset = -1 * np.ones((0, 20))
164
+ candidate = np.array([item for sublist in all_peaks for item in sublist])
165
+
166
+ for k in range(len(mapIdx)):
167
+ if k not in special_k:
168
+ partAs = connection_all[k][:, 0]
169
+ partBs = connection_all[k][:, 1]
170
+ indexA, indexB = np.array(limbSeq[k]) - 1
171
+
172
+ for i in range(len(connection_all[k])): # = 1:size(temp,1)
173
+ found = 0
174
+ subset_idx = [-1, -1]
175
+ for j in range(len(subset)): # 1:size(subset,1):
176
+ if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
177
+ subset_idx[found] = j
178
+ found += 1
179
+
180
+ if found == 1:
181
+ j = subset_idx[0]
182
+ if subset[j][indexB] != partBs[i]:
183
+ subset[j][indexB] = partBs[i]
184
+ subset[j][-1] += 1
185
+ subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
186
+ elif found == 2: # if found 2 and disjoint, merge them
187
+ j1, j2 = subset_idx
188
+ membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
189
+ if len(np.nonzero(membership == 2)[0]) == 0: # merge
190
+ subset[j1][:-2] += (subset[j2][:-2] + 1)
191
+ subset[j1][-2:] += subset[j2][-2:]
192
+ subset[j1][-2] += connection_all[k][i][2]
193
+ subset = np.delete(subset, j2, 0)
194
+ else: # as like found == 1
195
+ subset[j1][indexB] = partBs[i]
196
+ subset[j1][-1] += 1
197
+ subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
198
+
199
+ # if no partA is found in the subset, create a new subset
200
+ elif not found and k < 17:
201
+ row = -1 * np.ones(20)
202
+ row[indexA] = partAs[i]
203
+ row[indexB] = partBs[i]
204
+ row[-1] = 2
205
+ row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
206
+ subset = np.vstack([subset, row])
207
+ # delete some rows of subset which has few parts occur
208
+ deleteIdx = []
209
+ for i in range(len(subset)):
210
+ if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
211
+ deleteIdx.append(i)
212
+ subset = np.delete(subset, deleteIdx, axis=0)
213
+
214
+ # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
215
+ # candidate: x, y, score, id
216
+ return candidate, subset
217
+
218
+
219
+ def get_pose_json(ori_img):
220
+ height, width, channels = ori_img.shape
221
+
222
+ candidate, subset = body_estimation(ori_img)
223
+
224
+ if len(candidate) == 0 or len(subset) == 0:
225
+ print("No poses found in the input image.")
226
+ return None
227
+
228
+ candidate_int = candidate.astype(int)
229
+ candidate_list = candidate_int[:, :2].tolist()
230
+
231
+ data = {
232
+ "width": width,
233
+ "height": height,
234
+ "keypoints": candidate_list
235
+ }
236
+ candidate_json = json.dumps(data)
237
+
238
+ return candidate_json
239
+
240
+
241
+ def process_image(this_input_image, the_body_estimation, these_args):
242
+ output_filename = '.'.join(this_input_image.split('.')[:-1]) + '.openpose.png'
243
+ if os.path.isfile(output_filename) and not args.force:
244
+ print(f"Output file {output_filename} already exists, skipping {this_input_image}")
245
+ return
246
+
247
+ ori_img = cv2.imread(this_input_image) # B,G,R order
248
+ # height, width, channels = ori_img.shape
249
+
250
+ try:
251
+ candidate, subset = the_body_estimation(ori_img)
252
+ except Exception as e:
253
+ print(f"Error processing image {input_image}: {e}")
254
+ return
255
+
256
+ if len(candidate) == 0 or len(subset) == 0:
257
+ print(f"No poses found in the input image {input_image}.")
258
+ return
259
+
260
+ if these_args.json_output:
261
+ candidate_json = get_pose_json(ori_img)
262
+ output_filename = '.'.join(input_image.split('.')[:-1]) + '.openpose.json'
263
+ with open(output_filename, 'w') as f:
264
+ f.write(candidate_json)
265
+
266
+ canvas = np.zeros_like(ori_img)
267
+ canvas.fill(0)
268
+ canvas = util.draw_bodypose(canvas, candidate, subset)
269
+
270
+ cv2.imwrite(output_filename, canvas)
271
+
272
+ if these_args.show_image:
273
+ plt.imshow(canvas[:, :, [2, 1, 0]])
274
+ plt.show()
275
+
276
+
277
+ if __name__ == "__main__":
278
+
279
+ parser = argparse.ArgumentParser(description='Body Pose Estimation using OpenPose', add_help=True)
280
+ group = parser.add_mutually_exclusive_group(required=True)
281
+ group.add_argument("-i", "--input_image", help="Path to the input image", type=str)
282
+ group.add_argument("-d", "--directory", help="Directory to search for images", type=str)
283
+ parser.add_argument("-p", "--patterns", help="Pattern to match for images in directory", type=str)
284
+ parser.add_argument("-r", "--recursive", help="Search for files in subdirectories recursively",
285
+ action="store_true")
286
+ parser.add_argument("-s", "--show_image", help="Display the output image", action="store_true")
287
+ parser.add_argument("-j", "--json_output", help="Save JSON output to file", action="store_true")
288
+ parser.add_argument("-f", "--force", help="Force processing of images even if output file already exists",
289
+ action="store_true")
290
+ args = parser.parse_args()
291
+
292
+ script_path = os.path.abspath(__file__)
293
+ script_dir = os.path.dirname(script_path)
294
+
295
+ model_dir = os.path.join(script_dir, "model")
296
+ os.makedirs(model_dir, exist_ok=True)
297
+
298
+ model_path = os.path.join(model_dir, "body_pose_model.pth")
299
+
300
+ if not os.path.isfile(model_path):
301
+ body_model_path = \
302
+ "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/body_pose_model.pth"
303
+ urllib.request.urlretrieve(body_model_path, model_path)
304
+
305
+ body_estimation = Body(model_path)
306
+
307
+ if args.input_image:
308
+ images = [args.input_image]
309
+ else:
310
+ patterns = args.patterns.split(',')
311
+ for pattern in patterns:
312
+ for input_image in glob.iglob(os.path.join(args.directory, '**', pattern)
313
+ if args.recursive else os.path.join(args.directory, pattern),
314
+ recursive=args.recursive):
315
+ print(f"Processing {input_image}")
316
+ process_image(input_image, body_estimation, args)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ setuptools
2
+ opencv-python~=4.7.0.72
3
+ numpy~=1.23.5
4
+ matplotlib~=3.7.1
5
+ scipy~=1.10.1
6
+ --extra-index-url https://download.pytorch.org/whl/cu118
7
+ torch~=2.0.1+cu118
8
+ torchvision~=0.15.2+cu118
setup.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name='openpose-converter',
5
+ version='0.0.1',
6
+ packages=find_packages(),
7
+ package_data={'openpose': ['*']}
8
+ )