hysts HF staff commited on
Commit
63849a0
1 Parent(s): ea10cf7
Files changed (4) hide show
  1. .pre-commit-config.yaml +35 -0
  2. .style.yapf +5 -0
  3. README.md +1 -1
  4. app.py +61 -92
.pre-commit-config.yaml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.2.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: double-quote-string-fixer
12
+ - id: end-of-file-fixer
13
+ - id: mixed-line-ending
14
+ args: ['--fix=lf']
15
+ - id: requirements-txt-fixer
16
+ - id: trailing-whitespace
17
+ - repo: https://github.com/myint/docformatter
18
+ rev: v1.4
19
+ hooks:
20
+ - id: docformatter
21
+ args: ['--in-place']
22
+ - repo: https://github.com/pycqa/isort
23
+ rev: 5.12.0
24
+ hooks:
25
+ - id: isort
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v0.991
28
+ hooks:
29
+ - id: mypy
30
+ args: ['--ignore-missing-imports']
31
+ - repo: https://github.com/google/yapf
32
+ rev: v0.32.0
33
+ hooks:
34
+ - id: yapf
35
+ args: ['--parallel', '--in-place']
.style.yapf ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [style]
2
+ based_on_style = pep8
3
+ blank_line_before_nested_class_or_def = false
4
+ spaces_before_comment = 2
5
+ split_before_logical_operator = true
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🔥
4
  colorFrom: gray
5
  colorTo: gray
6
  sdk: gradio
7
- sdk_version: 3.0.5
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: gray
5
  colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 3.19.1
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -2,26 +2,28 @@
2
 
3
  from __future__ import annotations
4
 
5
- import argparse
6
  import functools
7
  import os
8
  import pathlib
 
9
  import subprocess
10
  import sys
11
  import urllib.request
12
 
13
  if os.environ.get('SYSTEM') == 'spaces':
14
  import mim
15
- mim.install('mmcv-full==1.3.3', is_yes=True)
16
 
17
- subprocess.call('pip uninstall -y opencv-python'.split())
18
- subprocess.call('pip uninstall -y opencv-python-headless'.split())
19
- subprocess.call('pip install opencv-python-headless==4.5.5.64'.split())
20
- subprocess.call('pip install terminaltables==3.1.0'.split())
21
- subprocess.call('pip install mmpycocotools==12.0.3'.split())
22
-
23
- subprocess.call('pip install insightface==0.6.2'.split())
24
 
 
 
 
25
 
26
  import cv2
27
  import gradio as gr
@@ -36,32 +38,15 @@ from mmdet.apis import inference_detector, init_detector, show_result_pyplot
36
 
37
  TITLE = 'insightface Face Detection (SCRFD)'
38
  DESCRIPTION = 'This is an unofficial demo for https://github.com/deepinsight/insightface/tree/master/detection/scrfd.'
39
- ARTICLE = '<center><img src="https://visitor-badge.glitch.me/badge?page_id=hysts.insightface-scrfd" alt="visitor badge"/></center>'
40
-
41
- TOKEN = os.environ['TOKEN']
42
-
43
 
44
- def parse_args() -> argparse.Namespace:
45
- parser = argparse.ArgumentParser()
46
- parser.add_argument('--face-score-slider-step', type=float, default=0.05)
47
- parser.add_argument('--face-score-threshold', type=float, default=0.3)
48
- parser.add_argument('--device', type=str, default='cpu')
49
- parser.add_argument('--theme', type=str)
50
- parser.add_argument('--live', action='store_true')
51
- parser.add_argument('--share', action='store_true')
52
- parser.add_argument('--port', type=int)
53
- parser.add_argument('--disable-queue',
54
- dest='enable_queue',
55
- action='store_false')
56
- parser.add_argument('--allow-flagging', type=str, default='never')
57
- return parser.parse_args()
58
 
59
 
60
  def load_model(model_size: str, device) -> nn.Module:
61
  ckpt_path = huggingface_hub.hf_hub_download(
62
  'hysts/insightface',
63
  f'models/scrfd_{model_size}/model.pth',
64
- use_auth_token=TOKEN)
65
  scrfd_dir = 'insightface/detection/scrfd'
66
  config_path = f'{scrfd_dir}/configs/scrfd/scrfd_{model_size}.py'
67
  model = init_detector(config_path, ckpt_path, device.type)
@@ -73,15 +58,15 @@ def update_test_pipeline(model: nn.Module, mode: int):
73
  pipelines = cfg.data.test.pipeline
74
  for pipeline in pipelines:
75
  if pipeline.type == 'MultiScaleFlipAug':
76
- if mode == 0: #640 scale
77
  pipeline.img_scale = (640, 640)
78
  if hasattr(pipeline, 'scale_factor'):
79
  del pipeline.scale_factor
80
- elif mode == 1: #for single scale in other pages
81
  pipeline.img_scale = (1100, 1650)
82
  if hasattr(pipeline, 'scale_factor'):
83
  del pipeline.scale_factor
84
- elif mode == 2: #original scale
85
  pipeline.img_scale = None
86
  pipeline.scale_factor = 1.0
87
  transforms = pipeline.transforms
@@ -122,64 +107,48 @@ def detect(image: np.ndarray, model_size: str, mode: int,
122
  return res
123
 
124
 
125
- def main():
126
- args = parse_args()
127
- device = torch.device(args.device)
128
-
129
- model_sizes = [
130
- '500m',
131
- '1g',
132
- '2.5g',
133
- '10g',
134
- '34g',
135
- ]
136
- detectors = {
137
- model_size: load_model(model_size, device=device)
138
- for model_size in model_sizes
139
- }
140
- modes = [
141
- '(640, 640)',
142
- '(1100, 1650)',
143
- 'original',
144
- ]
145
-
146
- func = functools.partial(detect, detectors=detectors)
147
- func = functools.update_wrapper(func, detect)
148
-
149
- image_path = pathlib.Path('selfie.jpg')
150
- if not image_path.exists():
151
- url = 'https://raw.githubusercontent.com/peiyunh/tiny/master/data/demo/selfie.jpg'
152
- urllib.request.urlretrieve(url, image_path)
153
- examples = [[image_path.as_posix(), '10g', modes[0], 0.3]]
154
-
155
- gr.Interface(
156
- func,
157
- [
158
- gr.inputs.Image(type='numpy', label='Input'),
159
- gr.inputs.Radio(
160
- model_sizes, type='value', default='10g', label='Model'),
161
- gr.inputs.Radio(
162
- modes, type='index', default=modes[0], label='Mode'),
163
- gr.inputs.Slider(0,
164
- 1,
165
- step=args.face_score_slider_step,
166
- default=args.face_score_threshold,
167
- label='Face Score Threshold'),
168
- ],
169
- gr.outputs.Image(type='numpy', label='Output'),
170
- examples=examples,
171
- title=TITLE,
172
- description=DESCRIPTION,
173
- article=ARTICLE,
174
- theme=args.theme,
175
- allow_flagging=args.allow_flagging,
176
- live=args.live,
177
- ).launch(
178
- enable_queue=args.enable_queue,
179
- server_port=args.port,
180
- share=args.share,
181
- )
182
-
183
-
184
- if __name__ == '__main__':
185
- main()
 
2
 
3
  from __future__ import annotations
4
 
 
5
  import functools
6
  import os
7
  import pathlib
8
+ import shlex
9
  import subprocess
10
  import sys
11
  import urllib.request
12
 
13
  if os.environ.get('SYSTEM') == 'spaces':
14
  import mim
15
+ mim.install('mmcv-full==1.4', is_yes=True)
16
 
17
+ subprocess.call(shlex.split('pip uninstall -y opencv-python'))
18
+ subprocess.call(shlex.split('pip uninstall -y opencv-python-headless'))
19
+ subprocess.call(
20
+ shlex.split('pip install opencv-python-headless==4.5.5.64'))
21
+ subprocess.call(shlex.split('pip install terminaltables==3.1.0'))
22
+ subprocess.call(shlex.split('pip install mmpycocotools==12.0.3'))
 
23
 
24
+ subprocess.call(shlex.split('pip install insightface==0.6.2'))
25
+ subprocess.call(shlex.split('sed -i 23,26d __init__.py'),
26
+ cwd='insightface/detection/scrfd/mmdet')
27
 
28
  import cv2
29
  import gradio as gr
 
38
 
39
  TITLE = 'insightface Face Detection (SCRFD)'
40
  DESCRIPTION = 'This is an unofficial demo for https://github.com/deepinsight/insightface/tree/master/detection/scrfd.'
 
 
 
 
41
 
42
+ HF_TOKEN = os.getenv('HF_TOKEN')
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
 
45
  def load_model(model_size: str, device) -> nn.Module:
46
  ckpt_path = huggingface_hub.hf_hub_download(
47
  'hysts/insightface',
48
  f'models/scrfd_{model_size}/model.pth',
49
+ use_auth_token=HF_TOKEN)
50
  scrfd_dir = 'insightface/detection/scrfd'
51
  config_path = f'{scrfd_dir}/configs/scrfd/scrfd_{model_size}.py'
52
  model = init_detector(config_path, ckpt_path, device.type)
 
58
  pipelines = cfg.data.test.pipeline
59
  for pipeline in pipelines:
60
  if pipeline.type == 'MultiScaleFlipAug':
61
+ if mode == 0: # 640 scale
62
  pipeline.img_scale = (640, 640)
63
  if hasattr(pipeline, 'scale_factor'):
64
  del pipeline.scale_factor
65
+ elif mode == 1: # for single scale in other pages
66
  pipeline.img_scale = (1100, 1650)
67
  if hasattr(pipeline, 'scale_factor'):
68
  del pipeline.scale_factor
69
+ elif mode == 2: # original scale
70
  pipeline.img_scale = None
71
  pipeline.scale_factor = 1.0
72
  transforms = pipeline.transforms
 
107
  return res
108
 
109
 
110
+ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
111
+
112
+ model_sizes = [
113
+ '500m',
114
+ '1g',
115
+ '2.5g',
116
+ '10g',
117
+ '34g',
118
+ ]
119
+ detectors = {
120
+ model_size: load_model(model_size, device=device)
121
+ for model_size in model_sizes
122
+ }
123
+ modes = [
124
+ '(640, 640)',
125
+ '(1100, 1650)',
126
+ 'original',
127
+ ]
128
+
129
+ func = functools.partial(detect, detectors=detectors)
130
+
131
+ image_path = pathlib.Path('selfie.jpg')
132
+ if not image_path.exists():
133
+ url = 'https://raw.githubusercontent.com/peiyunh/tiny/master/data/demo/selfie.jpg'
134
+ urllib.request.urlretrieve(url, image_path)
135
+ examples = [[image_path.as_posix(), '10g', modes[0], 0.3]]
136
+
137
+ gr.Interface(
138
+ fn=func,
139
+ inputs=[
140
+ gr.Image(label='Input', type='numpy'),
141
+ gr.Radio(label='Model', choices=model_sizes, type='value',
142
+ value='10g'),
143
+ gr.Radio(label='Mode', choices=modes, type='index', value=modes[0]),
144
+ gr.Slider(label='Face Score Threshold',
145
+ minimum=0,
146
+ maximum=1,
147
+ step=0.05,
148
+ default=0.3),
149
+ ],
150
+ outputs=gr.Image(label='Output', type='numpy'),
151
+ examples=examples,
152
+ title=TITLE,
153
+ description=DESCRIPTION,
154
+ ).queue().launch(show_api=False)