hysts HF staff commited on
Commit
0654980
1 Parent(s): be3384a
Files changed (7) hide show
  1. .pre-commit-config.yaml +59 -34
  2. .style.yapf +0 -5
  3. .vscode/settings.json +30 -0
  4. README.md +1 -1
  5. app.py +57 -47
  6. requirements.txt +1 -1
  7. style.css +8 -0
.pre-commit-config.yaml CHANGED
@@ -1,35 +1,60 @@
1
  repos:
2
- - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v4.2.0
4
- hooks:
5
- - id: check-executables-have-shebangs
6
- - id: check-json
7
- - id: check-merge-conflict
8
- - id: check-shebang-scripts-are-executable
9
- - id: check-toml
10
- - id: check-yaml
11
- - id: double-quote-string-fixer
12
- - id: end-of-file-fixer
13
- - id: mixed-line-ending
14
- args: ['--fix=lf']
15
- - id: requirements-txt-fixer
16
- - id: trailing-whitespace
17
- - repo: https://github.com/myint/docformatter
18
- rev: v1.4
19
- hooks:
20
- - id: docformatter
21
- args: ['--in-place']
22
- - repo: https://github.com/pycqa/isort
23
- rev: 5.12.0
24
- hooks:
25
- - id: isort
26
- - repo: https://github.com/pre-commit/mirrors-mypy
27
- rev: v0.991
28
- hooks:
29
- - id: mypy
30
- args: ['--ignore-missing-imports']
31
- - repo: https://github.com/google/yapf
32
- rev: v0.32.0
33
- hooks:
34
- - id: yapf
35
- args: ['--parallel', '--in-place']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.5.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: end-of-file-fixer
12
+ - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
+ - id: requirements-txt-fixer
15
+ - id: trailing-whitespace
16
+ - repo: https://github.com/myint/docformatter
17
+ rev: v1.7.5
18
+ hooks:
19
+ - id: docformatter
20
+ args: ["--in-place"]
21
+ - repo: https://github.com/pycqa/isort
22
+ rev: 5.13.2
23
+ hooks:
24
+ - id: isort
25
+ args: ["--profile", "black"]
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v1.8.0
28
+ hooks:
29
+ - id: mypy
30
+ args: ["--ignore-missing-imports"]
31
+ additional_dependencies:
32
+ [
33
+ "types-python-slugify",
34
+ "types-requests",
35
+ "types-PyYAML",
36
+ "types-pytz",
37
+ ]
38
+ - repo: https://github.com/psf/black
39
+ rev: 24.2.0
40
+ hooks:
41
+ - id: black
42
+ language_version: python3.10
43
+ args: ["--line-length", "119"]
44
+ - repo: https://github.com/kynan/nbstripout
45
+ rev: 0.7.1
46
+ hooks:
47
+ - id: nbstripout
48
+ args:
49
+ [
50
+ "--extra-keys",
51
+ "metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
52
+ ]
53
+ - repo: https://github.com/nbQA-dev/nbQA
54
+ rev: 1.7.1
55
+ hooks:
56
+ - id: nbqa-black
57
+ - id: nbqa-pyupgrade
58
+ args: ["--py37-plus"]
59
+ - id: nbqa-isort
60
+ args: ["--float-to-top"]
.style.yapf DELETED
@@ -1,5 +0,0 @@
1
- [style]
2
- based_on_style = pep8
3
- blank_line_before_nested_class_or_def = false
4
- spaces_before_comment = 2
5
- split_before_logical_operator = true
 
 
 
 
 
 
.vscode/settings.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "editor.formatOnSave": true,
3
+ "files.insertFinalNewline": false,
4
+ "[python]": {
5
+ "editor.defaultFormatter": "ms-python.black-formatter",
6
+ "editor.formatOnType": true,
7
+ "editor.codeActionsOnSave": {
8
+ "source.organizeImports": "explicit"
9
+ }
10
+ },
11
+ "[jupyter]": {
12
+ "files.insertFinalNewline": false
13
+ },
14
+ "black-formatter.args": [
15
+ "--line-length=119"
16
+ ],
17
+ "isort.args": ["--profile", "black"],
18
+ "flake8.args": [
19
+ "--max-line-length=119"
20
+ ],
21
+ "ruff.lint.args": [
22
+ "--line-length=119"
23
+ ],
24
+ "notebook.output.scrolling": true,
25
+ "notebook.formatOnCellExecution": true,
26
+ "notebook.formatOnSave.enabled": true,
27
+ "notebook.codeActionsOnSave": {
28
+ "source.organizeImports": "explicit"
29
+ }
30
+ }
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🐢
4
  colorFrom: yellow
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 3.36.1
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: yellow
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 4.19.2
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -18,31 +18,29 @@ import PIL.Image
18
  import torch
19
  import torchvision.transforms as T
20
 
21
- sys.path.insert(0, 'anime_face_landmark_detection')
22
 
23
  from CFA import CFA
24
 
25
- DESCRIPTION = '# [kanosawa/anime_face_landmark_detection](https://github.com/kanosawa/anime_face_landmark_detection)'
26
 
27
  NUM_LANDMARK = 24
28
  CROP_SIZE = 128
29
 
30
 
31
  def load_sample_image_paths() -> list[pathlib.Path]:
32
- image_dir = pathlib.Path('images')
33
  if not image_dir.exists():
34
- dataset_repo = 'hysts/sample-images-TADNE'
35
- path = huggingface_hub.hf_hub_download(dataset_repo,
36
- 'images.tar.gz',
37
- repo_type='dataset')
38
  with tarfile.open(path) as f:
39
  f.extractall()
40
- return sorted(image_dir.glob('*'))
41
 
42
 
43
  def load_face_detector() -> cv2.CascadeClassifier:
44
- url = 'https://raw.githubusercontent.com/nagadomi/lbpcascade_animeface/master/lbpcascade_animeface.xml'
45
- path = pathlib.Path('lbpcascade_animeface.xml')
46
  if not path.exists():
47
  urllib.request.urlretrieve(url, path.as_posix())
48
  return cv2.CascadeClassifier(path.as_posix())
@@ -50,8 +48,8 @@ def load_face_detector() -> cv2.CascadeClassifier:
50
 
51
  def load_landmark_detector(device: torch.device) -> torch.nn.Module:
52
  path = huggingface_hub.hf_hub_download(
53
- 'public-data/anime_face_landmark_detection',
54
- 'checkpoint_landmark_191116.pth')
55
  model = CFA(output_channel_num=NUM_LANDMARK + 1, checkpoint_name=path)
56
  model.to(device)
57
  model.eval()
@@ -59,15 +57,16 @@ def load_landmark_detector(device: torch.device) -> torch.nn.Module:
59
 
60
 
61
  @torch.inference_mode()
62
- def detect(image_path: str, face_detector: cv2.CascadeClassifier,
63
- device: torch.device, transform: Callable,
64
- landmark_detector: torch.nn.Module) -> np.ndarray:
 
 
 
 
65
  image = cv2.imread(image_path)
66
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
67
- preds = face_detector.detectMultiScale(gray,
68
- scaleFactor=1.1,
69
- minNeighbors=5,
70
- minSize=(24, 24))
71
 
72
  image_h, image_w = image.shape[:2]
73
  pil_image = PIL.Image.fromarray(image[:, :, ::-1].copy())
@@ -93,48 +92,59 @@ def detect(image_path: str, face_detector: cv2.CascadeClassifier,
93
  cv2.rectangle(res, (x0, y0), (x1, y1), (0, 255, 0), 2)
94
 
95
  for i in range(NUM_LANDMARK):
96
- heatmap = cv2.resize(heatmaps[i], (CROP_SIZE, CROP_SIZE),
97
- interpolation=cv2.INTER_CUBIC)
98
  pty, ptx = np.unravel_index(np.argmax(heatmap), heatmap.shape)
99
- pt_crop = np.round(np.array([ptx * w, pty * h]) /
100
- CROP_SIZE).astype(int)
101
  pt = np.array([x0, y0]) + pt_crop
102
  cv2.circle(res, tuple(pt), 2, (0, 0, 255), cv2.FILLED)
103
 
104
  return res[:, :, ::-1]
105
 
106
 
107
- device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
108
 
109
  image_paths = load_sample_image_paths()
110
  examples = [[path.as_posix()] for path in image_paths]
111
 
112
  face_detector = load_face_detector()
113
  landmark_detector = load_landmark_detector(device)
114
- transform = T.Compose([
115
- T.ToTensor(),
116
- T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
117
- ])
118
-
119
- fn = functools.partial(detect,
120
- face_detector=face_detector,
121
- device=device,
122
- transform=transform,
123
- landmark_detector=landmark_detector)
124
-
125
- with gr.Blocks(css='style.css') as demo:
 
 
 
 
126
  gr.Markdown(DESCRIPTION)
127
  with gr.Row():
128
  with gr.Column():
129
- image = gr.Image(label='Input', type='filepath')
130
- run_button = gr.Button('Run')
131
  with gr.Column():
132
- result = gr.Image(label='Result')
133
-
134
- gr.Examples(examples=examples,
135
- inputs=image,
136
- outputs=result,
137
- fn=fn,
138
- cache_examples=os.getenv('CACHE_EXAMPLES') == '1')
139
- run_button.click(fn=fn, inputs=image, outputs=result, api_name='predict')
140
- demo.queue(max_size=15).launch()
 
 
 
 
 
 
 
 
 
 
18
  import torch
19
  import torchvision.transforms as T
20
 
21
+ sys.path.insert(0, "anime_face_landmark_detection")
22
 
23
  from CFA import CFA
24
 
25
+ DESCRIPTION = "# [kanosawa/anime_face_landmark_detection](https://github.com/kanosawa/anime_face_landmark_detection)"
26
 
27
  NUM_LANDMARK = 24
28
  CROP_SIZE = 128
29
 
30
 
31
  def load_sample_image_paths() -> list[pathlib.Path]:
32
+ image_dir = pathlib.Path("images")
33
  if not image_dir.exists():
34
+ dataset_repo = "hysts/sample-images-TADNE"
35
+ path = huggingface_hub.hf_hub_download(dataset_repo, "images.tar.gz", repo_type="dataset")
 
 
36
  with tarfile.open(path) as f:
37
  f.extractall()
38
+ return sorted(image_dir.glob("*"))
39
 
40
 
41
  def load_face_detector() -> cv2.CascadeClassifier:
42
+ url = "https://raw.githubusercontent.com/nagadomi/lbpcascade_animeface/master/lbpcascade_animeface.xml"
43
+ path = pathlib.Path("lbpcascade_animeface.xml")
44
  if not path.exists():
45
  urllib.request.urlretrieve(url, path.as_posix())
46
  return cv2.CascadeClassifier(path.as_posix())
 
48
 
49
  def load_landmark_detector(device: torch.device) -> torch.nn.Module:
50
  path = huggingface_hub.hf_hub_download(
51
+ "public-data/anime_face_landmark_detection", "checkpoint_landmark_191116.pth"
52
+ )
53
  model = CFA(output_channel_num=NUM_LANDMARK + 1, checkpoint_name=path)
54
  model.to(device)
55
  model.eval()
 
57
 
58
 
59
  @torch.inference_mode()
60
+ def detect(
61
+ image_path: str,
62
+ face_detector: cv2.CascadeClassifier,
63
+ device: torch.device,
64
+ transform: Callable,
65
+ landmark_detector: torch.nn.Module,
66
+ ) -> np.ndarray:
67
  image = cv2.imread(image_path)
68
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
69
+ preds = face_detector.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(24, 24))
 
 
 
70
 
71
  image_h, image_w = image.shape[:2]
72
  pil_image = PIL.Image.fromarray(image[:, :, ::-1].copy())
 
92
  cv2.rectangle(res, (x0, y0), (x1, y1), (0, 255, 0), 2)
93
 
94
  for i in range(NUM_LANDMARK):
95
+ heatmap = cv2.resize(heatmaps[i], (CROP_SIZE, CROP_SIZE), interpolation=cv2.INTER_CUBIC)
 
96
  pty, ptx = np.unravel_index(np.argmax(heatmap), heatmap.shape)
97
+ pt_crop = np.round(np.array([ptx * w, pty * h]) / CROP_SIZE).astype(int)
 
98
  pt = np.array([x0, y0]) + pt_crop
99
  cv2.circle(res, tuple(pt), 2, (0, 0, 255), cv2.FILLED)
100
 
101
  return res[:, :, ::-1]
102
 
103
 
104
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
105
 
106
  image_paths = load_sample_image_paths()
107
  examples = [[path.as_posix()] for path in image_paths]
108
 
109
  face_detector = load_face_detector()
110
  landmark_detector = load_landmark_detector(device)
111
+ transform = T.Compose(
112
+ [
113
+ T.ToTensor(),
114
+ T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
115
+ ]
116
+ )
117
+
118
+ fn = functools.partial(
119
+ detect,
120
+ face_detector=face_detector,
121
+ device=device,
122
+ transform=transform,
123
+ landmark_detector=landmark_detector,
124
+ )
125
+
126
+ with gr.Blocks(css="style.css") as demo:
127
  gr.Markdown(DESCRIPTION)
128
  with gr.Row():
129
  with gr.Column():
130
+ image = gr.Image(label="Input", type="filepath")
131
+ run_button = gr.Button("Run")
132
  with gr.Column():
133
+ result = gr.Image(label="Result")
134
+
135
+ gr.Examples(
136
+ examples=examples,
137
+ inputs=image,
138
+ outputs=result,
139
+ fn=fn,
140
+ cache_examples=os.getenv("CACHE_EXAMPLES") == "1",
141
+ )
142
+ run_button.click(
143
+ fn=fn,
144
+ inputs=image,
145
+ outputs=result,
146
+ api_name="predict",
147
+ )
148
+
149
+ if __name__ == "__main__":
150
+ demo.queue(max_size=15).launch()
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
- opencv-python-headless>=4.7.0.72
2
  torch==2.0.1
3
  torchvision==0.15.2
 
1
+ opencv-python-headless>=4.9.0.80
2
  torch==2.0.1
3
  torchvision==0.15.2
style.css CHANGED
@@ -1,3 +1,11 @@
1
  h1 {
2
  text-align: center;
 
 
 
 
 
 
 
 
3
  }
 
1
  h1 {
2
  text-align: center;
3
+ display: block;
4
+ }
5
+
6
+ #duplicate-button {
7
+ margin: auto;
8
+ color: #fff;
9
+ background: #1565c0;
10
+ border-radius: 100vh;
11
  }