hysts HF staff commited on
Commit
15d0ea9
1 Parent(s): f0b7f4a
Files changed (6) hide show
  1. .pre-commit-config.yaml +59 -34
  2. .style.yapf +0 -5
  3. .vscode/settings.json +30 -0
  4. README.md +1 -1
  5. app.py +37 -38
  6. requirements.txt +3 -3
.pre-commit-config.yaml CHANGED
@@ -1,35 +1,60 @@
1
  repos:
2
- - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v4.2.0
4
- hooks:
5
- - id: check-executables-have-shebangs
6
- - id: check-json
7
- - id: check-merge-conflict
8
- - id: check-shebang-scripts-are-executable
9
- - id: check-toml
10
- - id: check-yaml
11
- - id: double-quote-string-fixer
12
- - id: end-of-file-fixer
13
- - id: mixed-line-ending
14
- args: ['--fix=lf']
15
- - id: requirements-txt-fixer
16
- - id: trailing-whitespace
17
- - repo: https://github.com/myint/docformatter
18
- rev: v1.4
19
- hooks:
20
- - id: docformatter
21
- args: ['--in-place']
22
- - repo: https://github.com/pycqa/isort
23
- rev: 5.12.0
24
- hooks:
25
- - id: isort
26
- - repo: https://github.com/pre-commit/mirrors-mypy
27
- rev: v0.991
28
- hooks:
29
- - id: mypy
30
- args: ['--ignore-missing-imports']
31
- - repo: https://github.com/google/yapf
32
- rev: v0.32.0
33
- hooks:
34
- - id: yapf
35
- args: ['--parallel', '--in-place']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.6.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: end-of-file-fixer
12
+ - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
+ - id: requirements-txt-fixer
15
+ - id: trailing-whitespace
16
+ - repo: https://github.com/myint/docformatter
17
+ rev: v1.7.5
18
+ hooks:
19
+ - id: docformatter
20
+ args: ["--in-place"]
21
+ - repo: https://github.com/pycqa/isort
22
+ rev: 5.13.2
23
+ hooks:
24
+ - id: isort
25
+ args: ["--profile", "black"]
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v1.10.0
28
+ hooks:
29
+ - id: mypy
30
+ args: ["--ignore-missing-imports"]
31
+ additional_dependencies:
32
+ [
33
+ "types-python-slugify",
34
+ "types-requests",
35
+ "types-PyYAML",
36
+ "types-pytz",
37
+ ]
38
+ - repo: https://github.com/psf/black
39
+ rev: 24.4.2
40
+ hooks:
41
+ - id: black
42
+ language_version: python3.10
43
+ args: ["--line-length", "119"]
44
+ - repo: https://github.com/kynan/nbstripout
45
+ rev: 0.7.1
46
+ hooks:
47
+ - id: nbstripout
48
+ args:
49
+ [
50
+ "--extra-keys",
51
+ "metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
52
+ ]
53
+ - repo: https://github.com/nbQA-dev/nbQA
54
+ rev: 1.8.5
55
+ hooks:
56
+ - id: nbqa-black
57
+ - id: nbqa-pyupgrade
58
+ args: ["--py37-plus"]
59
+ - id: nbqa-isort
60
+ args: ["--float-to-top"]
.style.yapf DELETED
@@ -1,5 +0,0 @@
1
- [style]
2
- based_on_style = pep8
3
- blank_line_before_nested_class_or_def = false
4
- spaces_before_comment = 2
5
- split_before_logical_operator = true
 
 
 
 
 
 
.vscode/settings.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "editor.formatOnSave": true,
3
+ "files.insertFinalNewline": false,
4
+ "[python]": {
5
+ "editor.defaultFormatter": "ms-python.black-formatter",
6
+ "editor.formatOnType": true,
7
+ "editor.codeActionsOnSave": {
8
+ "source.organizeImports": "explicit"
9
+ }
10
+ },
11
+ "[jupyter]": {
12
+ "files.insertFinalNewline": false
13
+ },
14
+ "black-formatter.args": [
15
+ "--line-length=119"
16
+ ],
17
+ "isort.args": ["--profile", "black"],
18
+ "flake8.args": [
19
+ "--max-line-length=119"
20
+ ],
21
+ "ruff.lint.args": [
22
+ "--line-length=119"
23
+ ],
24
+ "notebook.output.scrolling": true,
25
+ "notebook.formatOnCellExecution": true,
26
+ "notebook.formatOnSave.enabled": true,
27
+ "notebook.codeActionsOnSave": {
28
+ "source.organizeImports": "explicit"
29
+ }
30
+ }
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 👁
4
  colorFrom: blue
5
  colorTo: gray
6
  sdk: gradio
7
- sdk_version: 3.36.1
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: blue
5
  colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 4.33.0
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -12,26 +12,32 @@ mp_drawing = mp.solutions.drawing_utils
12
  mp_drawing_styles = mp.solutions.drawing_styles
13
  mp_pose = mp.solutions.pose
14
 
15
- TITLE = 'MediaPipe Human Pose Estimation'
16
- DESCRIPTION = 'https://google.github.io/mediapipe/'
17
 
18
 
19
- def run(image: np.ndarray, model_complexity: int, enable_segmentation: bool,
20
- min_detection_confidence: float, background_color: str) -> np.ndarray:
 
 
 
 
 
21
  with mp_pose.Pose(
22
- static_image_mode=True,
23
- model_complexity=model_complexity,
24
- enable_segmentation=enable_segmentation,
25
- min_detection_confidence=min_detection_confidence) as pose:
 
26
  results = pose.process(image)
27
 
28
  res = image[:, :, ::-1].copy()
29
  if enable_segmentation:
30
- if background_color == 'white':
31
  bg_color = 255
32
- elif background_color == 'black':
33
  bg_color = 0
34
- elif background_color == 'green':
35
  bg_color = (0, 255, 0) # type: ignore
36
  else:
37
  raise ValueError
@@ -41,43 +47,36 @@ def run(image: np.ndarray, model_complexity: int, enable_segmentation: bool,
41
  else:
42
  res[:] = bg_color
43
 
44
- mp_drawing.draw_landmarks(res,
45
- results.pose_landmarks,
46
- mp_pose.POSE_CONNECTIONS,
47
- landmark_drawing_spec=mp_drawing_styles.
48
- get_default_pose_landmarks_style())
 
49
 
50
  return res[:, :, ::-1]
51
 
52
 
53
  model_complexities = list(range(3))
54
- background_colors = ['white', 'black', 'green']
55
 
56
- image_paths = sorted(pathlib.Path('images').rglob('*.jpg'))
57
- examples = [[path, model_complexities[1], True, 0.5, background_colors[0]]
58
- for path in image_paths]
59
 
60
- gr.Interface(
61
  fn=run,
62
  inputs=[
63
- gr.Image(label='Input', type='numpy'),
64
- gr.Radio(label='Model Complexity',
65
- choices=model_complexities,
66
- type='index',
67
- value=model_complexities[1]),
68
- gr.Checkbox(label='Enable Segmentation', value=True),
69
- gr.Slider(label='Minimum Detection Confidence',
70
- minimum=0,
71
- maximum=1,
72
- step=0.05,
73
- value=0.5),
74
- gr.Radio(label='Background Color',
75
- choices=background_colors,
76
- type='value',
77
- value=background_colors[0]),
78
  ],
79
- outputs=gr.Image(label='Output', height=500),
80
  examples=examples,
81
  title=TITLE,
82
  description=DESCRIPTION,
83
- ).queue().launch()
 
 
 
 
12
  mp_drawing_styles = mp.solutions.drawing_styles
13
  mp_pose = mp.solutions.pose
14
 
15
+ TITLE = "MediaPipe Human Pose Estimation"
16
+ DESCRIPTION = "https://google.github.io/mediapipe/"
17
 
18
 
19
+ def run(
20
+ image: np.ndarray,
21
+ model_complexity: int,
22
+ enable_segmentation: bool,
23
+ min_detection_confidence: float,
24
+ background_color: str,
25
+ ) -> np.ndarray:
26
  with mp_pose.Pose(
27
+ static_image_mode=True,
28
+ model_complexity=model_complexity,
29
+ enable_segmentation=enable_segmentation,
30
+ min_detection_confidence=min_detection_confidence,
31
+ ) as pose:
32
  results = pose.process(image)
33
 
34
  res = image[:, :, ::-1].copy()
35
  if enable_segmentation:
36
+ if background_color == "white":
37
  bg_color = 255
38
+ elif background_color == "black":
39
  bg_color = 0
40
+ elif background_color == "green":
41
  bg_color = (0, 255, 0) # type: ignore
42
  else:
43
  raise ValueError
 
47
  else:
48
  res[:] = bg_color
49
 
50
+ mp_drawing.draw_landmarks(
51
+ res,
52
+ results.pose_landmarks,
53
+ mp_pose.POSE_CONNECTIONS,
54
+ landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style(),
55
+ )
56
 
57
  return res[:, :, ::-1]
58
 
59
 
60
  model_complexities = list(range(3))
61
+ background_colors = ["white", "black", "green"]
62
 
63
+ image_paths = sorted(pathlib.Path("images").rglob("*.jpg"))
64
+ examples = [[path, model_complexities[1], True, 0.5, background_colors[0]] for path in image_paths]
 
65
 
66
+ demo = gr.Interface(
67
  fn=run,
68
  inputs=[
69
+ gr.Image(label="Input", type="numpy"),
70
+ gr.Radio(label="Model Complexity", choices=model_complexities, type="index", value=model_complexities[1]),
71
+ gr.Checkbox(label="Enable Segmentation", value=True),
72
+ gr.Slider(label="Minimum Detection Confidence", minimum=0, maximum=1, step=0.05, value=0.5),
73
+ gr.Radio(label="Background Color", choices=background_colors, type="value", value=background_colors[0]),
 
 
 
 
 
 
 
 
 
 
74
  ],
75
+ outputs=gr.Image(label="Output"),
76
  examples=examples,
77
  title=TITLE,
78
  description=DESCRIPTION,
79
+ )
80
+
81
+ if __name__ == "__main__":
82
+ demo.queue().launch()
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
- mediapipe==0.10.1
2
- numpy==1.23.5
3
- opencv-python-headless==4.8.0.74
 
1
+ mediapipe==0.10.14
2
+ numpy==1.26.4
3
+ opencv-python-headless==4.10.0.82