Yuantao Feng
commited on
Commit
·
9d96bb5
1
Parent(s):
00c0329
Add SFace for face recognition (#3)
Browse files* SFace and demo impl
* benchmark support and results for SFace
- README.md +5 -4
- benchmark/benchmark.py +1 -1
- benchmark/config/face_detection_yunet.yaml +1 -1
- benchmark/config/face_recognition_sface.yaml +17 -0
- models/__init__.py +3 -1
- models/face_detection_yunet/demo.py +2 -2
- models/face_recognition_sface/LICENSE +202 -0
- models/face_recognition_sface/README.md +27 -0
- models/face_recognition_sface/demo.py +71 -0
- models/face_recognition_sface/sface.py +165 -0
README.md
CHANGED
@@ -17,7 +17,7 @@ Hardware Setup:
|
|
17 |
|
18 |
***Important Notes***:
|
19 |
- The time data that shown on the following tables presents the time elapsed from preprocess (resize is excluded), to a forward pass of a network, and postprocess to get final results.
|
20 |
-
- The time data that shown on the following tables is
|
21 |
- View [benchmark/config](./benchmark/config) for more details on benchmarking different models.
|
22 |
|
23 |
<!--
|
@@ -29,9 +29,10 @@ Hardware Setup:
|
|
29 |
-->
|
30 |
| Model | Input Size | CPU x86_64 (ms) | CPU ARM (ms) |
|
31 |
|-------|------------|-----------------|--------------|
|
32 |
-
| [YuNet](./models/face_detection_yunet)
|
33 |
-
| [DB](./models/text_detection_db)
|
34 |
-
| [CRNN](./models/text_recognition_crnn)
|
|
|
35 |
|
36 |
|
37 |
## License
|
|
|
17 |
|
18 |
***Important Notes***:
|
19 |
- The time data that shown on the following tables presents the time elapsed from preprocess (resize is excluded), to a forward pass of a network, and postprocess to get final results.
|
20 |
+
- The time data that shown on the following tables is the median of benchmark runs.
|
21 |
- View [benchmark/config](./benchmark/config) for more details on benchmarking different models.
|
22 |
|
23 |
<!--
|
|
|
29 |
-->
|
30 |
| Model | Input Size | CPU x86_64 (ms) | CPU ARM (ms) |
|
31 |
|-------|------------|-----------------|--------------|
|
32 |
+
| [YuNet](./models/face_detection_yunet) | 160x120 | 2.35 | 8.72 |
|
33 |
+
| [DB](./models/text_detection_db) | 640x480 | 137.38 | 2780.78 |
|
34 |
+
| [CRNN](./models/text_recognition_crnn) | 100x32 | 50.21 | 234.32 |
|
35 |
+
| [SFace](./models/face_recognition_sface) | 112x112 | 8.69 | 96.79 |
|
36 |
|
37 |
|
38 |
## License
|
benchmark/benchmark.py
CHANGED
@@ -78,7 +78,7 @@ class Data:
|
|
78 |
def _load_label(self):
|
79 |
labels = dict.fromkeys(self._files, None)
|
80 |
for filename in self._files:
|
81 |
-
labels[filename] = np.loadtxt(os.path.join(self._path, '{}.txt'.format(filename[:-4])))
|
82 |
return labels
|
83 |
|
84 |
def __getitem__(self, idx):
|
|
|
78 |
def _load_label(self):
|
79 |
labels = dict.fromkeys(self._files, None)
|
80 |
for filename in self._files:
|
81 |
+
labels[filename] = np.loadtxt(os.path.join(self._path, '{}.txt'.format(filename[:-4])), ndmin=2)
|
82 |
return labels
|
83 |
|
84 |
def __getitem__(self, idx):
|
benchmark/config/face_detection_yunet.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
Benchmark:
|
2 |
name: "Face Detection Benchmark"
|
3 |
data:
|
4 |
-
path: "benchmark/data/face"
|
5 |
files: ["group.jpg", "concerts.jpg", "dance.jpg"]
|
6 |
metric:
|
7 |
sizes: # [[w1, h1], ...], Omit to run at original scale
|
|
|
1 |
Benchmark:
|
2 |
name: "Face Detection Benchmark"
|
3 |
data:
|
4 |
+
path: "benchmark/data/face/detection"
|
5 |
files: ["group.jpg", "concerts.jpg", "dance.jpg"]
|
6 |
metric:
|
7 |
sizes: # [[w1, h1], ...], Omit to run at original scale
|
benchmark/config/face_recognition_sface.yaml
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Benchmark:
|
2 |
+
name: "Face Recognition Benchmark"
|
3 |
+
data:
|
4 |
+
path: "benchmark/data/face/recognition"
|
5 |
+
files: ["Aaron_Tippin_0001.jpg", "Alvaro_Uribe_0028.jpg", "Alvaro_Uribe_0029.jpg", "Jose_Luis_Rodriguez_Zapatero_0001.jpg"]
|
6 |
+
useLabel: True
|
7 |
+
metric: # 'sizes' is omitted since this model requires input of fixed size
|
8 |
+
warmup: 3
|
9 |
+
repeat: 10
|
10 |
+
batchSize: 1
|
11 |
+
reduction: 'median'
|
12 |
+
backend: "default"
|
13 |
+
target: "cpu"
|
14 |
+
|
15 |
+
Model:
|
16 |
+
name: "SFace"
|
17 |
+
modelPath: "models/face_recognition_sface/face_recognition_sface.onnx"
|
models/__init__.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
from .face_detection_yunet.yunet import YuNet
|
2 |
from .text_detection_db.db import DB
|
3 |
from .text_recognition_crnn.crnn import CRNN
|
|
|
4 |
|
5 |
class Registery:
|
6 |
def __init__(self, name):
|
@@ -16,4 +17,5 @@ class Registery:
|
|
16 |
MODELS = Registery('Models')
|
17 |
MODELS.register(YuNet)
|
18 |
MODELS.register(DB)
|
19 |
-
MODELS.register(CRNN)
|
|
|
|
1 |
from .face_detection_yunet.yunet import YuNet
|
2 |
from .text_detection_db.db import DB
|
3 |
from .text_recognition_crnn.crnn import CRNN
|
4 |
+
from .face_recognition_sface.sface import SFace
|
5 |
|
6 |
class Registery:
|
7 |
def __init__(self, name):
|
|
|
17 |
MODELS = Registery('Models')
|
18 |
MODELS.register(YuNet)
|
19 |
MODELS.register(DB)
|
20 |
+
MODELS.register(CRNN)
|
21 |
+
MODELS.register(SFace)
|
models/face_detection_yunet/demo.py
CHANGED
@@ -77,8 +77,8 @@ if __name__ == '__main__':
|
|
77 |
# Print results
|
78 |
print('{} faces detected.'.format(results.shape[0]))
|
79 |
for idx, det in enumerate(results):
|
80 |
-
print('{}:
|
81 |
-
idx, det[
|
82 |
)
|
83 |
|
84 |
# Draw results on the input image
|
|
|
77 |
# Print results
|
78 |
print('{} faces detected.'.format(results.shape[0]))
|
79 |
for idx, det in enumerate(results):
|
80 |
+
print('{}: {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f}'.format(
|
81 |
+
idx, *det[:-1])
|
82 |
)
|
83 |
|
84 |
# Draw results on the input image
|
models/face_recognition_sface/LICENSE
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
Apache License
|
3 |
+
Version 2.0, January 2004
|
4 |
+
http://www.apache.org/licenses/
|
5 |
+
|
6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
7 |
+
|
8 |
+
1. Definitions.
|
9 |
+
|
10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
12 |
+
|
13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
14 |
+
the copyright owner that is granting the License.
|
15 |
+
|
16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
17 |
+
other entities that control, are controlled by, or are under common
|
18 |
+
control with that entity. For the purposes of this definition,
|
19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
20 |
+
direction or management of such entity, whether by contract or
|
21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
23 |
+
|
24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
25 |
+
exercising permissions granted by this License.
|
26 |
+
|
27 |
+
"Source" form shall mean the preferred form for making modifications,
|
28 |
+
including but not limited to software source code, documentation
|
29 |
+
source, and configuration files.
|
30 |
+
|
31 |
+
"Object" form shall mean any form resulting from mechanical
|
32 |
+
transformation or translation of a Source form, including but
|
33 |
+
not limited to compiled object code, generated documentation,
|
34 |
+
and conversions to other media types.
|
35 |
+
|
36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
37 |
+
Object form, made available under the License, as indicated by a
|
38 |
+
copyright notice that is included in or attached to the work
|
39 |
+
(an example is provided in the Appendix below).
|
40 |
+
|
41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
42 |
+
form, that is based on (or derived from) the Work and for which the
|
43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
45 |
+
of this License, Derivative Works shall not include works that remain
|
46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
47 |
+
the Work and Derivative Works thereof.
|
48 |
+
|
49 |
+
"Contribution" shall mean any work of authorship, including
|
50 |
+
the original version of the Work and any modifications or additions
|
51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
55 |
+
means any form of electronic, verbal, or written communication sent
|
56 |
+
to the Licensor or its representatives, including but not limited to
|
57 |
+
communication on electronic mailing lists, source code control systems,
|
58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
60 |
+
excluding communication that is conspicuously marked or otherwise
|
61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
62 |
+
|
63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
65 |
+
subsequently incorporated within the Work.
|
66 |
+
|
67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
72 |
+
Work and such Derivative Works in Source or Object form.
|
73 |
+
|
74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
77 |
+
(except as stated in this section) patent license to make, have made,
|
78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
79 |
+
where such license applies only to those patent claims licensable
|
80 |
+
by such Contributor that are necessarily infringed by their
|
81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
83 |
+
institute patent litigation against any entity (including a
|
84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
85 |
+
or a Contribution incorporated within the Work constitutes direct
|
86 |
+
or contributory patent infringement, then any patent licenses
|
87 |
+
granted to You under this License for that Work shall terminate
|
88 |
+
as of the date such litigation is filed.
|
89 |
+
|
90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
91 |
+
Work or Derivative Works thereof in any medium, with or without
|
92 |
+
modifications, and in Source or Object form, provided that You
|
93 |
+
meet the following conditions:
|
94 |
+
|
95 |
+
(a) You must give any other recipients of the Work or
|
96 |
+
Derivative Works a copy of this License; and
|
97 |
+
|
98 |
+
(b) You must cause any modified files to carry prominent notices
|
99 |
+
stating that You changed the files; and
|
100 |
+
|
101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
102 |
+
that You distribute, all copyright, patent, trademark, and
|
103 |
+
attribution notices from the Source form of the Work,
|
104 |
+
excluding those notices that do not pertain to any part of
|
105 |
+
the Derivative Works; and
|
106 |
+
|
107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
108 |
+
distribution, then any Derivative Works that You distribute must
|
109 |
+
include a readable copy of the attribution notices contained
|
110 |
+
within such NOTICE file, excluding those notices that do not
|
111 |
+
pertain to any part of the Derivative Works, in at least one
|
112 |
+
of the following places: within a NOTICE text file distributed
|
113 |
+
as part of the Derivative Works; within the Source form or
|
114 |
+
documentation, if provided along with the Derivative Works; or,
|
115 |
+
within a display generated by the Derivative Works, if and
|
116 |
+
wherever such third-party notices normally appear. The contents
|
117 |
+
of the NOTICE file are for informational purposes only and
|
118 |
+
do not modify the License. You may add Your own attribution
|
119 |
+
notices within Derivative Works that You distribute, alongside
|
120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
121 |
+
that such additional attribution notices cannot be construed
|
122 |
+
as modifying the License.
|
123 |
+
|
124 |
+
You may add Your own copyright statement to Your modifications and
|
125 |
+
may provide additional or different license terms and conditions
|
126 |
+
for use, reproduction, or distribution of Your modifications, or
|
127 |
+
for any such Derivative Works as a whole, provided Your use,
|
128 |
+
reproduction, and distribution of the Work otherwise complies with
|
129 |
+
the conditions stated in this License.
|
130 |
+
|
131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
133 |
+
by You to the Licensor shall be under the terms and conditions of
|
134 |
+
this License, without any additional terms or conditions.
|
135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
136 |
+
the terms of any separate license agreement you may have executed
|
137 |
+
with Licensor regarding such Contributions.
|
138 |
+
|
139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
141 |
+
except as required for reasonable and customary use in describing the
|
142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
143 |
+
|
144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
145 |
+
agreed to in writing, Licensor provides the Work (and each
|
146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
148 |
+
implied, including, without limitation, any warranties or conditions
|
149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
151 |
+
appropriateness of using or redistributing the Work and assume any
|
152 |
+
risks associated with Your exercise of permissions under this License.
|
153 |
+
|
154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
155 |
+
whether in tort (including negligence), contract, or otherwise,
|
156 |
+
unless required by applicable law (such as deliberate and grossly
|
157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
158 |
+
liable to You for damages, including any direct, indirect, special,
|
159 |
+
incidental, or consequential damages of any character arising as a
|
160 |
+
result of this License or out of the use or inability to use the
|
161 |
+
Work (including but not limited to damages for loss of goodwill,
|
162 |
+
work stoppage, computer failure or malfunction, or any and all
|
163 |
+
other commercial damages or losses), even if such Contributor
|
164 |
+
has been advised of the possibility of such damages.
|
165 |
+
|
166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
169 |
+
or other liability obligations and/or rights consistent with this
|
170 |
+
License. However, in accepting such obligations, You may act only
|
171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
172 |
+
of any other Contributor, and only if You agree to indemnify,
|
173 |
+
defend, and hold each Contributor harmless for any liability
|
174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
175 |
+
of your accepting any such warranty or additional liability.
|
176 |
+
|
177 |
+
END OF TERMS AND CONDITIONS
|
178 |
+
|
179 |
+
APPENDIX: How to apply the Apache License to your work.
|
180 |
+
|
181 |
+
To apply the Apache License to your work, attach the following
|
182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
183 |
+
replaced with your own identifying information. (Don't include
|
184 |
+
the brackets!) The text should be enclosed in the appropriate
|
185 |
+
comment syntax for the file format. We also recommend that a
|
186 |
+
file or class name and description of purpose be included on the
|
187 |
+
same "printed page" as the copyright notice for easier
|
188 |
+
identification within third-party archives.
|
189 |
+
|
190 |
+
Copyright [yyyy] [name of copyright owner]
|
191 |
+
|
192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
193 |
+
you may not use this file except in compliance with the License.
|
194 |
+
You may obtain a copy of the License at
|
195 |
+
|
196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
197 |
+
|
198 |
+
Unless required by applicable law or agreed to in writing, software
|
199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
201 |
+
See the License for the specific language governing permissions and
|
202 |
+
limitations under the License.
|
models/face_recognition_sface/README.md
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# SFace
|
2 |
+
|
3 |
+
SFace: Sigmoid-Constrained Hypersphere Loss for Robust Face Recognition
|
4 |
+
|
5 |
+
SFace is contributed by [Yaoyao Zhong](https://github.com/zhongyy/SFace). [face_recognition_sface.onnx](./face_recognition_sface.onnx) is converted from the model from https://github.com/zhongyy/SFace thanks to [Chengrui Wang](https://github.com/crywang).
|
6 |
+
|
7 |
+
Note:
|
8 |
+
- There is [a PR for OpenCV adding this model](https://github.com/opencv/opencv/pull/20422) to work with OpenCV DNN in C++ implementation.
|
9 |
+
- Support 5-landmark warp for now.
|
10 |
+
- `demo.py` requires [../face_detection_yunet](../face_detection_yunet) to run.
|
11 |
+
|
12 |
+
## Demo
|
13 |
+
|
14 |
+
Run the following command to try the demo:
|
15 |
+
```shell
|
16 |
+
# recognize on images
|
17 |
+
python demo.py --input1 /path/to/image1 --input2 /path/to/image2
|
18 |
+
```
|
19 |
+
|
20 |
+
## License
|
21 |
+
|
22 |
+
All files in this directory are licensed under [Apache 2.0 License](./LICENSE).
|
23 |
+
|
24 |
+
## Reference
|
25 |
+
|
26 |
+
- https://ieeexplore.ieee.org/document/9318547
|
27 |
+
- https://github.com/zhongyy/SFace
|
models/face_recognition_sface/demo.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is part of OpenCV Zoo project.
|
2 |
+
# It is subject to the license terms in the LICENSE file found in the same directory.
|
3 |
+
#
|
4 |
+
# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
|
5 |
+
# Third party copyrights are property of their respective owners.
|
6 |
+
|
7 |
+
import sys
|
8 |
+
import argparse
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
import cv2 as cv
|
12 |
+
|
13 |
+
from sface import SFace
|
14 |
+
|
15 |
+
sys.path.append('../face_detection_yunet')
|
16 |
+
from yunet import YuNet
|
17 |
+
|
18 |
+
def str2bool(v):
|
19 |
+
if v.lower() in ['on', 'yes', 'true', 'y', 't']:
|
20 |
+
return True
|
21 |
+
elif v.lower() in ['off', 'no', 'false', 'n', 'f']:
|
22 |
+
return False
|
23 |
+
else:
|
24 |
+
raise NotImplementedError
|
25 |
+
|
26 |
+
parser = argparse.ArgumentParser(
|
27 |
+
description="SFace: Sigmoid-Constrained Hypersphere Loss for Robust Face Recognition (https://ieeexplore.ieee.org/document/9318547)")
|
28 |
+
parser.add_argument('--input1', '-i1', type=str, help='Path to the input image 1.')
|
29 |
+
parser.add_argument('--input2', '-i2', type=str, help='Path to the input image 2.')
|
30 |
+
parser.add_argument('--model', '-m', type=str, default='face_recognition_sface.onnx', help='Path to the model.')
|
31 |
+
parser.add_argument('--dis_type', type=int, choices=[0, 1], default=0, help='Distance type. \'0\': cosine, \'1\': norm_l1.')
|
32 |
+
parser.add_argument('--save', '-s', type=str, default=False, help='Set true to save results. This flag is invalid when using camera.')
|
33 |
+
parser.add_argument('--vis', '-v', type=str2bool, default=True, help='Set true to open a window for result visualization. This flag is invalid when using camera.')
|
34 |
+
args = parser.parse_args()
|
35 |
+
|
36 |
+
if __name__ == '__main__':
|
37 |
+
# Instantiate SFace for face recognition
|
38 |
+
recognizer = SFace(modelPath=args.model)
|
39 |
+
# Instantiate YuNet for face detection
|
40 |
+
detector = YuNet(modelPath='../face_detection_yunet/face_detection_yunet.onnx',
|
41 |
+
inputSize=[320, 320],
|
42 |
+
confThreshold=0.9,
|
43 |
+
nmsThreshold=0.3,
|
44 |
+
topK=5000,
|
45 |
+
keepTopK=750)
|
46 |
+
|
47 |
+
img1 = cv.imread(args.input1)
|
48 |
+
img2 = cv.imread(args.input2)
|
49 |
+
|
50 |
+
# Detect faces
|
51 |
+
detector.setInputSize([img1.shape[1], img1.shape[0]])
|
52 |
+
face1 = detector.infer(img1)
|
53 |
+
assert face1.shape[0] > 0, 'Cannot find a face in {}'.format(args.input1)
|
54 |
+
detector.setInputSize([img2.shape[1], img2.shape[0]])
|
55 |
+
face2 = detector.infer(img2)
|
56 |
+
assert face2.shape[0] > 0, 'Cannot find a face in {}'.format(args.input2)
|
57 |
+
|
58 |
+
# Match
|
59 |
+
distance = recognizer.match(img1, face1[0][:-1], img2, face2[0][:-1], args.dis_type)
|
60 |
+
print(distance)
|
61 |
+
if args.dis_type == 0:
|
62 |
+
dis_type = 'Cosine'
|
63 |
+
threshold = 0.363
|
64 |
+
result = 'same identity' if distance >= threshold else 'different identity'
|
65 |
+
elif args.dis_type == 1:
|
66 |
+
dis_type = 'Norm-L2'
|
67 |
+
threshold = 1.128
|
68 |
+
result = 'same identity' if distance <= threshold else 'different identity'
|
69 |
+
else:
|
70 |
+
raise NotImplementedError()
|
71 |
+
print('Using {} distance, threshold {}: {}.'.format(dis_type, threshold, result))
|
models/face_recognition_sface/sface.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is part of OpenCV Zoo project.
|
2 |
+
# It is subject to the license terms in the LICENSE file found in the same directory.
|
3 |
+
#
|
4 |
+
# Copyright (C) 2021, Shenzhen Institute of Artificial Intelligence and Robotics for Society, all rights reserved.
|
5 |
+
# Third party copyrights are property of their respective owners.
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import cv2 as cv
|
9 |
+
|
10 |
+
from _testcapi import FLT_MIN
|
11 |
+
|
12 |
+
class SFace:
|
13 |
+
def __init__(self, modelPath):
|
14 |
+
self._model = cv.dnn.readNet(modelPath)
|
15 |
+
self._input_size = [112, 112]
|
16 |
+
self._dst = np.array([
|
17 |
+
[38.2946, 51.6963],
|
18 |
+
[73.5318, 51.5014],
|
19 |
+
[56.0252, 71.7366],
|
20 |
+
[41.5493, 92.3655],
|
21 |
+
[70.7299, 92.2041]
|
22 |
+
], dtype=np.float32)
|
23 |
+
self._dst_mean = np.array([56.0262, 71.9008], dtype=np.float32)
|
24 |
+
|
25 |
+
@property
|
26 |
+
def name(self):
|
27 |
+
return self.__class__.__name__
|
28 |
+
|
29 |
+
def setBackend(self, backend_id):
|
30 |
+
self._model.setPreferableBackend(backend_id)
|
31 |
+
|
32 |
+
def setTarget(self, target_id):
|
33 |
+
self._model.setPreferableTarget(target_id)
|
34 |
+
|
35 |
+
def _preprocess(self, image, bbox):
|
36 |
+
aligned_image = self._alignCrop(image, bbox)
|
37 |
+
return cv.dnn.blobFromImage(aligned_image)
|
38 |
+
|
39 |
+
def infer(self, image, bbox):
|
40 |
+
# Preprocess
|
41 |
+
inputBlob = self._preprocess(image, bbox)
|
42 |
+
|
43 |
+
# Forward
|
44 |
+
self._model.setInput(inputBlob)
|
45 |
+
outputBlob = self._model.forward()
|
46 |
+
|
47 |
+
# Postprocess
|
48 |
+
results = self._postprocess(outputBlob)
|
49 |
+
|
50 |
+
return results
|
51 |
+
|
52 |
+
def _postprocess(self, outputBlob):
|
53 |
+
return outputBlob / cv.norm(outputBlob)
|
54 |
+
|
55 |
+
def match(self, image1, face1, image2, face2, dis_type=0):
|
56 |
+
feature1 = self.infer(image1, face1)
|
57 |
+
feature2 = self.infer(image2, face2)
|
58 |
+
|
59 |
+
if dis_type == 0: # COSINE
|
60 |
+
return np.sum(feature1 * feature2)
|
61 |
+
elif dis_type == 1: # NORM_L2
|
62 |
+
return cv.norm(feature1, feature2)
|
63 |
+
else:
|
64 |
+
raise NotImplementedError()
|
65 |
+
|
66 |
+
def _alignCrop(self, image, face):
|
67 |
+
# Retrieve landmarks
|
68 |
+
if face.shape[-1] == (4 + 5 * 2):
|
69 |
+
landmarks = face[4:].reshape(5, 2)
|
70 |
+
else:
|
71 |
+
raise NotImplementedError()
|
72 |
+
warp_mat = self._getSimilarityTransformMatrix(landmarks)
|
73 |
+
aligned_image = cv.warpAffine(image, warp_mat, self._input_size, flags=cv.INTER_LINEAR)
|
74 |
+
return aligned_image
|
75 |
+
|
76 |
+
def _getSimilarityTransformMatrix(self, src):
|
77 |
+
# compute the mean of src and dst
|
78 |
+
src_mean = np.array([np.mean(src[:, 0]), np.mean(src[:, 1])], dtype=np.float32)
|
79 |
+
dst_mean = np.array([56.0262, 71.9008], dtype=np.float32)
|
80 |
+
# subtract the means from src and dst
|
81 |
+
src_demean = src.copy()
|
82 |
+
src_demean[:, 0] = src_demean[:, 0] - src_mean[0]
|
83 |
+
src_demean[:, 1] = src_demean[:, 1] - src_mean[1]
|
84 |
+
dst_demean = self._dst.copy()
|
85 |
+
dst_demean[:, 0] = dst_demean[:, 0] - dst_mean[0]
|
86 |
+
dst_demean[:, 1] = dst_demean[:, 1] - dst_mean[1]
|
87 |
+
|
88 |
+
A = np.array([[0., 0.], [0., 0.]], dtype=np.float64)
|
89 |
+
for i in range(5):
|
90 |
+
A[0][0] += dst_demean[i][0] * src_demean[i][0]
|
91 |
+
A[0][1] += dst_demean[i][0] * src_demean[i][1]
|
92 |
+
A[1][0] += dst_demean[i][1] * src_demean[i][0]
|
93 |
+
A[1][1] += dst_demean[i][1] * src_demean[i][1]
|
94 |
+
A = A / 5
|
95 |
+
|
96 |
+
d = np.array([1.0, 1.0], dtype=np.float64)
|
97 |
+
if A[0][0] * A[1][1] - A[0][1] * A[1][0] < 0:
|
98 |
+
d[1] = -1
|
99 |
+
|
100 |
+
T = np.array([
|
101 |
+
[1.0, 0.0, 0.0],
|
102 |
+
[0.0, 1.0, 0.0],
|
103 |
+
[0.0, 0.0, 1.0]
|
104 |
+
], dtype=np.float64)
|
105 |
+
|
106 |
+
s, u, vt = cv.SVDecomp(A)
|
107 |
+
smax = s[0][0] if s[0][0] > s[1][0] else s[1][0]
|
108 |
+
tol = smax * 2 * FLT_MIN
|
109 |
+
rank = int(0)
|
110 |
+
if s[0][0] > tol:
|
111 |
+
rank += 1
|
112 |
+
if s[1][0] > tol:
|
113 |
+
rank += 1
|
114 |
+
det_u = u[0][0] * u[1][1] - u[0][1] * u[1][0]
|
115 |
+
det_vt = vt[0][0] * vt[1][1] - vt[0][1] * vt[1][0]
|
116 |
+
if rank == 1:
|
117 |
+
if det_u * det_vt > 0:
|
118 |
+
uvt = np.matmul(u, vt)
|
119 |
+
T[0][0] = uvt[0][0]
|
120 |
+
T[0][1] = uvt[0][1]
|
121 |
+
T[1][0] = uvt[1][0]
|
122 |
+
T[1][1] = uvt[1][1]
|
123 |
+
else:
|
124 |
+
temp = d[1]
|
125 |
+
d[1] = -1
|
126 |
+
D = np.array([[d[0], 0.0], [0.0, d[1]]], dtype=np.float64)
|
127 |
+
Dvt = np.matmul(D, vt)
|
128 |
+
uDvt = np.matmul(u, Dvt)
|
129 |
+
T[0][0] = uDvt[0][0]
|
130 |
+
T[0][1] = uDvt[0][1]
|
131 |
+
T[1][0] = uDvt[1][0]
|
132 |
+
T[1][1] = uDvt[1][1]
|
133 |
+
d[1] = temp
|
134 |
+
else:
|
135 |
+
D = np.array([[d[0], 0.0], [0.0, d[1]]], dtype=np.float64)
|
136 |
+
Dvt = np.matmul(D, vt)
|
137 |
+
uDvt = np.matmul(u, Dvt)
|
138 |
+
T[0][0] = uDvt[0][0]
|
139 |
+
T[0][1] = uDvt[0][1]
|
140 |
+
T[1][0] = uDvt[1][0]
|
141 |
+
T[1][1] = uDvt[1][1]
|
142 |
+
|
143 |
+
var1 = 0.0
|
144 |
+
var2 = 0.0
|
145 |
+
for i in range(5):
|
146 |
+
var1 += src_demean[i][0] * src_demean[i][0]
|
147 |
+
var2 += src_demean[i][1] * src_demean[i][1]
|
148 |
+
var1 /= 5
|
149 |
+
var2 /= 5
|
150 |
+
|
151 |
+
scale = 1.0 / (var1 + var2) * (s[0][0] * d[0] + s[1][0] * d[1])
|
152 |
+
TS = [
|
153 |
+
T[0][0] * src_mean[0] + T[0][1] * src_mean[1],
|
154 |
+
T[1][0] * src_mean[0] + T[1][1] * src_mean[1]
|
155 |
+
]
|
156 |
+
T[0][2] = dst_mean[0] - scale * TS[0]
|
157 |
+
T[1][2] = dst_mean[1] - scale * TS[1]
|
158 |
+
T[0][0] *= scale
|
159 |
+
T[0][1] *= scale
|
160 |
+
T[1][0] *= scale
|
161 |
+
T[1][1] *= scale
|
162 |
+
return np.array([
|
163 |
+
[T[0][0], T[0][1], T[0][2]],
|
164 |
+
[T[1][0], T[1][1], T[1][2]]
|
165 |
+
], dtype=np.float64)
|