Dev-mohamed commited on
Commit
677c57e
1 Parent(s): 6dfda4d
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. DeepFace.py +585 -0
  2. LICENSE +21 -0
  3. Makefile +8 -0
  4. Train.py +55 -0
  5. __init__.py +1 -0
  6. __pycache__/DeepFace.cpython-312.pyc +0 -0
  7. __pycache__/__init__.cpython-312.pyc +0 -0
  8. api/__init__.py +0 -0
  9. api/__pycache__/__init__.cpython-312.pyc +0 -0
  10. api/postman/deepface-api.postman_collection.json +102 -0
  11. api/src/__init__.py +0 -0
  12. api/src/__pycache__/__init__.cpython-312.pyc +0 -0
  13. api/src/__pycache__/app.cpython-312.pyc +0 -0
  14. api/src/api.py +10 -0
  15. api/src/app.py +11 -0
  16. api/src/modules/__init__.py +0 -0
  17. api/src/modules/__pycache__/__init__.cpython-312.pyc +0 -0
  18. api/src/modules/core/__init__.py +0 -0
  19. api/src/modules/core/__pycache__/__init__.cpython-312.pyc +0 -0
  20. api/src/modules/core/__pycache__/routes.cpython-312.pyc +0 -0
  21. api/src/modules/core/__pycache__/service.cpython-312.pyc +0 -0
  22. api/src/modules/core/routes.py +207 -0
  23. api/src/modules/core/service.py +84 -0
  24. basemodels/ArcFace.py +179 -0
  25. basemodels/DeepID.py +99 -0
  26. basemodels/Dlib.py +89 -0
  27. basemodels/Facenet.py +1715 -0
  28. basemodels/FbDeepFace.py +105 -0
  29. basemodels/GhostFaceNet.py +312 -0
  30. basemodels/OpenFace.py +397 -0
  31. basemodels/SFace.py +87 -0
  32. basemodels/VGGFace.py +160 -0
  33. basemodels/__init__.py +0 -0
  34. basemodels/__pycache__/ArcFace.cpython-312.pyc +0 -0
  35. basemodels/__pycache__/DeepID.cpython-312.pyc +0 -0
  36. basemodels/__pycache__/Dlib.cpython-312.pyc +0 -0
  37. basemodels/__pycache__/Facenet.cpython-312.pyc +0 -0
  38. basemodels/__pycache__/FbDeepFace.cpython-312.pyc +0 -0
  39. basemodels/__pycache__/GhostFaceNet.cpython-312.pyc +0 -0
  40. basemodels/__pycache__/OpenFace.cpython-312.pyc +0 -0
  41. basemodels/__pycache__/SFace.cpython-312.pyc +0 -0
  42. basemodels/__pycache__/VGGFace.cpython-312.pyc +0 -0
  43. basemodels/__pycache__/__init__.cpython-312.pyc +0 -0
  44. commons/__init__.py +0 -0
  45. commons/__pycache__/__init__.cpython-312.pyc +0 -0
  46. commons/__pycache__/file_utils.cpython-312.pyc +0 -0
  47. commons/__pycache__/folder_utils.cpython-312.pyc +0 -0
  48. commons/__pycache__/image_utils.cpython-312.pyc +0 -0
  49. commons/__pycache__/logger.cpython-312.pyc +0 -0
  50. commons/__pycache__/os_path.cpython-312.pyc +0 -0
DeepFace.py ADDED
@@ -0,0 +1,585 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # common dependencies
2
+ import os
3
+ import warnings
4
+ import logging
5
+ from typing import Any, Dict, List, Union, Optional
6
+ from deepface.commons.os_path import os_path
7
+
8
+ # this has to be set before importing tensorflow
9
+ os.environ["TF_USE_LEGACY_KERAS"] = "1"
10
+
11
+ # pylint: disable=wrong-import-position
12
+
13
+ # 3rd party dependencies
14
+ import numpy as np
15
+ import pandas as pd
16
+ import tensorflow as tf
17
+
18
+ # package dependencies
19
+ from deepface.commons import package_utils, folder_utils
20
+ from deepface.commons import logger as log
21
+ from deepface.modules import (
22
+ modeling,
23
+ representation,
24
+ verification,
25
+ recognition,
26
+ demography,
27
+ detection,
28
+ streaming,
29
+ preprocessing,
30
+ cloudservice,
31
+ )
32
+ from deepface import __version__
33
+
34
+ logger = log.get_singletonish_logger()
35
+
36
+ # -----------------------------------
37
+ # configurations for dependencies
38
+
39
+ # users should install tf_keras package if they are using tf 2.16 or later versions
40
+ package_utils.validate_for_keras3()
41
+
42
+ warnings.filterwarnings("ignore")
43
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
44
+ tf_version = package_utils.get_tf_major_version()
45
+ if tf_version == 2:
46
+ tf.get_logger().setLevel(logging.ERROR)
47
+ # -----------------------------------
48
+
49
+ # create required folders if necessary to store model weights
50
+ folder_utils.initialize_folder()
51
+
52
+
53
+ def build_model(model_name: str) -> Any:
54
+ """
55
+ This function builds a deepface model
56
+ Args:
57
+ model_name (string): face recognition or facial attribute model
58
+ VGG-Face, Facenet, OpenFace, DeepFace, DeepID for face recognition
59
+ Age, Gender, Emotion, Race for facial attributes
60
+ Returns:
61
+ built_model
62
+ """
63
+ return modeling.build_model(model_name=model_name)
64
+
65
+
66
+ def verify(
67
+ img1_path: Union[str, np.ndarray, List[float]],
68
+ img2_path: Union[str, np.ndarray, List[float]],
69
+ model_name: str = "VGG-Face",
70
+ detector_backend: str = "opencv",
71
+ distance_metric: str = "cosine",
72
+ enforce_detection: bool = True,
73
+ align: bool = True,
74
+ expand_percentage: int = 0,
75
+ normalization: str = "base",
76
+ silent: bool = False,
77
+ ) -> Dict[str, Any]:
78
+ """
79
+ Verify if an image pair represents the same person or different persons.
80
+ Args:
81
+ img1_path (str or np.ndarray or List[float]): Path to the first image.
82
+ Accepts exact image path as a string, numpy array (BGR), base64 encoded images
83
+ or pre-calculated embeddings.
84
+
85
+ img2_path (str or np.ndarray or List[float]): Path to the second image.
86
+ Accepts exact image path as a string, numpy array (BGR), base64 encoded images
87
+ or pre-calculated embeddings.
88
+
89
+ model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
90
+ OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
91
+
92
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
93
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
94
+ (default is opencv).
95
+
96
+ distance_metric (string): Metric for measuring similarity. Options: 'cosine',
97
+ 'euclidean', 'euclidean_l2' (default is cosine).
98
+
99
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
100
+ Set to False to avoid the exception for low-resolution images (default is True).
101
+
102
+ align (bool): Flag to enable face alignment (default is True).
103
+
104
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
105
+
106
+ normalization (string): Normalize the input image before feeding it to the model.
107
+ Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace (default is base)
108
+
109
+ silent (boolean): Suppress or allow some log messages for a quieter analysis process
110
+ (default is False).
111
+
112
+ Returns:
113
+ result (dict): A dictionary containing verification results with following keys.
114
+
115
+ - 'verified' (bool): Indicates whether the images represent the same person (True)
116
+ or different persons (False).
117
+
118
+ - 'distance' (float): The distance measure between the face vectors.
119
+ A lower distance indicates higher similarity.
120
+
121
+ - 'max_threshold_to_verify' (float): The maximum threshold used for verification.
122
+ If the distance is below this threshold, the images are considered a match.
123
+
124
+ - 'model' (str): The chosen face recognition model.
125
+
126
+ - 'distance_metric' (str): The chosen similarity metric for measuring distances.
127
+
128
+ - 'facial_areas' (dict): Rectangular regions of interest for faces in both images.
129
+ - 'img1': {'x': int, 'y': int, 'w': int, 'h': int}
130
+ Region of interest for the first image.
131
+ - 'img2': {'x': int, 'y': int, 'w': int, 'h': int}
132
+ Region of interest for the second image.
133
+
134
+ - 'time' (float): Time taken for the verification process in seconds.
135
+ """
136
+
137
+ return verification.verify(
138
+ img1_path=img1_path,
139
+ img2_path=img2_path,
140
+ model_name=model_name,
141
+ detector_backend=detector_backend,
142
+ distance_metric=distance_metric,
143
+ enforce_detection=enforce_detection,
144
+ align=align,
145
+ expand_percentage=expand_percentage,
146
+ normalization=normalization,
147
+ silent=silent,
148
+ )
149
+
150
+
151
+ def analyze(
152
+ img_path: Union[str, np.ndarray],
153
+ actions: Union[tuple, list] = ("emotion", "age", "gender", "race"),
154
+ enforce_detection: bool = True,
155
+ detector_backend: str = "opencv",
156
+ align: bool = True,
157
+ expand_percentage: int = 0,
158
+ silent: bool = False,
159
+ ) -> List[Dict[str, Any]]:
160
+ """
161
+ Analyze facial attributes such as age, gender, emotion, and race in the provided image.
162
+ Args:
163
+ img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
164
+ or a base64 encoded image. If the source image contains multiple faces, the result will
165
+ include information for each detected face.
166
+
167
+ actions (tuple): Attributes to analyze. The default is ('age', 'gender', 'emotion', 'race').
168
+ You can exclude some of these attributes from the analysis if needed.
169
+
170
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
171
+ Set to False to avoid the exception for low-resolution images (default is True).
172
+
173
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
174
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
175
+ (default is opencv).
176
+
177
+ distance_metric (string): Metric for measuring similarity. Options: 'cosine',
178
+ 'euclidean', 'euclidean_l2' (default is cosine).
179
+
180
+ align (boolean): Perform alignment based on the eye positions (default is True).
181
+
182
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
183
+
184
+ silent (boolean): Suppress or allow some log messages for a quieter analysis process
185
+ (default is False).
186
+
187
+ Returns:
188
+ results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary represents
189
+ the analysis results for a detected face. Each dictionary in the list contains the
190
+ following keys:
191
+
192
+ - 'region' (dict): Represents the rectangular region of the detected face in the image.
193
+ - 'x': x-coordinate of the top-left corner of the face.
194
+ - 'y': y-coordinate of the top-left corner of the face.
195
+ - 'w': Width of the detected face region.
196
+ - 'h': Height of the detected face region.
197
+
198
+ - 'age' (float): Estimated age of the detected face.
199
+
200
+ - 'face_confidence' (float): Confidence score for the detected face.
201
+ Indicates the reliability of the face detection.
202
+
203
+ - 'dominant_gender' (str): The dominant gender in the detected face.
204
+ Either "Man" or "Woman".
205
+
206
+ - 'gender' (dict): Confidence scores for each gender category.
207
+ - 'Man': Confidence score for the male gender.
208
+ - 'Woman': Confidence score for the female gender.
209
+
210
+ - 'dominant_emotion' (str): The dominant emotion in the detected face.
211
+ Possible values include "sad," "angry," "surprise," "fear," "happy,"
212
+ "disgust," and "neutral"
213
+
214
+ - 'emotion' (dict): Confidence scores for each emotion category.
215
+ - 'sad': Confidence score for sadness.
216
+ - 'angry': Confidence score for anger.
217
+ - 'surprise': Confidence score for surprise.
218
+ - 'fear': Confidence score for fear.
219
+ - 'happy': Confidence score for happiness.
220
+ - 'disgust': Confidence score for disgust.
221
+ - 'neutral': Confidence score for neutrality.
222
+
223
+ - 'dominant_race' (str): The dominant race in the detected face.
224
+ Possible values include "indian," "asian," "latino hispanic,"
225
+ "black," "middle eastern," and "white."
226
+
227
+ - 'race' (dict): Confidence scores for each race category.
228
+ - 'indian': Confidence score for Indian ethnicity.
229
+ - 'asian': Confidence score for Asian ethnicity.
230
+ - 'latino hispanic': Confidence score for Latino/Hispanic ethnicity.
231
+ - 'black': Confidence score for Black ethnicity.
232
+ - 'middle eastern': Confidence score for Middle Eastern ethnicity.
233
+ - 'white': Confidence score for White ethnicity.
234
+ """
235
+ return demography.analyze(
236
+ img_path=img_path,
237
+ actions=actions,
238
+ enforce_detection=enforce_detection,
239
+ detector_backend=detector_backend,
240
+ align=align,
241
+ expand_percentage=expand_percentage,
242
+ silent=silent,
243
+ )
244
+
245
+
246
+ def find(
247
+ img_path: Union[str, np.ndarray],
248
+ db_path: str,
249
+ model_name: str = "VGG-Face",
250
+ distance_metric: str = "cosine",
251
+ enforce_detection: bool = True,
252
+ detector_backend: str = "opencv",
253
+ align: bool = True,
254
+ expand_percentage: int = 0,
255
+ threshold: Optional[float] = None,
256
+ normalization: str = "base",
257
+ silent: bool = False,
258
+ ) -> List[pd.DataFrame]:
259
+ """
260
+ Identify individuals in a database
261
+ Args:
262
+ img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
263
+ or a base64 encoded image. If the source image contains multiple faces, the result will
264
+ include information for each detected face.
265
+
266
+ db_path (string): Path to the folder containing image files. All detected faces
267
+ in the database will be considered in the decision-making process.
268
+
269
+ model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
270
+ OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
271
+
272
+ distance_metric (string): Metric for measuring similarity. Options: 'cosine',
273
+ 'euclidean', 'euclidean_l2' (default is cosine).
274
+
275
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
276
+ Set to False to avoid the exception for low-resolution images (default is True).
277
+
278
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
279
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
280
+ (default is opencv).
281
+
282
+ align (boolean): Perform alignment based on the eye positions (default is True).
283
+
284
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
285
+
286
+ threshold (float): Specify a threshold to determine whether a pair represents the same
287
+ person or different individuals. This threshold is used for comparing distances.
288
+ If left unset, default pre-tuned threshold values will be applied based on the specified
289
+ model name and distance metric (default is None).
290
+
291
+ normalization (string): Normalize the input image before feeding it to the model.
292
+ Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace (default is base).
293
+
294
+ silent (boolean): Suppress or allow some log messages for a quieter analysis process
295
+ (default is False).
296
+
297
+ Returns:
298
+ results (List[pd.DataFrame]): A list of pandas dataframes. Each dataframe corresponds
299
+ to the identity information for an individual detected in the source image.
300
+ The DataFrame columns include:
301
+
302
+ - 'identity': Identity label of the detected individual.
303
+
304
+ - 'target_x', 'target_y', 'target_w', 'target_h': Bounding box coordinates of the
305
+ target face in the database.
306
+
307
+ - 'source_x', 'source_y', 'source_w', 'source_h': Bounding box coordinates of the
308
+ detected face in the source image.
309
+
310
+ - 'threshold': threshold to determine a pair whether same person or different persons
311
+
312
+ - 'distance': Similarity score between the faces based on the
313
+ specified model and distance metric
314
+ """
315
+ return recognition.find(
316
+ img_path=img_path,
317
+ db_path=db_path,
318
+ model_name=model_name,
319
+ distance_metric=distance_metric,
320
+ enforce_detection=enforce_detection,
321
+ detector_backend=detector_backend,
322
+ align=align,
323
+ expand_percentage=expand_percentage,
324
+ threshold=threshold,
325
+ normalization=normalization,
326
+ silent=silent,
327
+ )
328
+
329
+
330
+ def represent(
331
+ img_path: Union[str, np.ndarray],
332
+ model_name: str = "VGG-Face",
333
+ enforce_detection: bool = True,
334
+ detector_backend: str = "opencv",
335
+ align: bool = True,
336
+ expand_percentage: int = 0,
337
+ normalization: str = "base",
338
+ ) -> List[Dict[str, Any]]:
339
+ """
340
+ Represent facial images as multi-dimensional vector embeddings.
341
+
342
+ Args:
343
+ img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
344
+ or a base64 encoded image. If the source image contains multiple faces, the result will
345
+ include information for each detected face.
346
+
347
+ model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
348
+ OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet
349
+ (default is VGG-Face.).
350
+
351
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
352
+ Default is True. Set to False to avoid the exception for low-resolution images
353
+ (default is True).
354
+
355
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
356
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
357
+ (default is opencv).
358
+
359
+ align (boolean): Perform alignment based on the eye positions (default is True).
360
+
361
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
362
+
363
+ normalization (string): Normalize the input image before feeding it to the model.
364
+ Default is base. Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace
365
+ (default is base).
366
+
367
+ Returns:
368
+ results (List[Dict[str, Any]]): A list of dictionaries, each containing the
369
+ following fields:
370
+
371
+ - embedding (List[float]): Multidimensional vector representing facial features.
372
+ The number of dimensions varies based on the reference model
373
+ (e.g., FaceNet returns 128 dimensions, VGG-Face returns 4096 dimensions).
374
+
375
+ - facial_area (dict): Detected facial area by face detection in dictionary format.
376
+ Contains 'x' and 'y' as the left-corner point, and 'w' and 'h'
377
+ as the width and height. If `detector_backend` is set to 'skip', it represents
378
+ the full image area and is nonsensical.
379
+
380
+ - face_confidence (float): Confidence score of face detection. If `detector_backend` is set
381
+ to 'skip', the confidence will be 0 and is nonsensical.
382
+ """
383
+ return representation.represent(
384
+ img_path=img_path,
385
+ model_name=model_name,
386
+ enforce_detection=enforce_detection,
387
+ detector_backend=detector_backend,
388
+ align=align,
389
+ expand_percentage=expand_percentage,
390
+ normalization=normalization,
391
+ )
392
+
393
+
394
+ def stream(
395
+ db_path: str = "",
396
+ model_name: str = "VGG-Face",
397
+ detector_backend: str = "opencv",
398
+ distance_metric: str = "cosine",
399
+ enable_face_analysis: bool = True,
400
+ source: Any = 0,
401
+ time_threshold: int = 5,
402
+ frame_threshold: int = 5,
403
+ ) -> None:
404
+ """
405
+ Run real time face recognition and facial attribute analysis
406
+
407
+ Args:
408
+ db_path (string): Path to the folder containing image files. All detected faces
409
+ in the database will be considered in the decision-making process.
410
+
411
+ model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
412
+ OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
413
+
414
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
415
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
416
+ (default is opencv).
417
+
418
+ distance_metric (string): Metric for measuring similarity. Options: 'cosine',
419
+ 'euclidean', 'euclidean_l2' (default is cosine).
420
+
421
+ enable_face_analysis (bool): Flag to enable face analysis (default is True).
422
+
423
+ source (Any): The source for the video stream (default is 0, which represents the
424
+ default camera).
425
+
426
+ time_threshold (int): The time threshold (in seconds) for face recognition (default is 5).
427
+
428
+ frame_threshold (int): The frame threshold for face recognition (default is 5).
429
+ Returns:
430
+ None
431
+ """
432
+
433
+ time_threshold = max(time_threshold, 1)
434
+ frame_threshold = max(frame_threshold, 1)
435
+
436
+ streaming.analysis(
437
+ db_path=db_path,
438
+ model_name=model_name,
439
+ detector_backend=detector_backend,
440
+ distance_metric=distance_metric,
441
+ enable_face_analysis=enable_face_analysis,
442
+ source=source,
443
+ time_threshold=time_threshold,
444
+ frame_threshold=frame_threshold,
445
+ )
446
+
447
+
448
+ def extract_faces(
449
+ img_path: Union[str, np.ndarray],
450
+ detector_backend: str = "opencv",
451
+ enforce_detection: bool = True,
452
+ align: bool = True,
453
+ expand_percentage: int = 0,
454
+ grayscale: bool = False,
455
+ ) -> List[Dict[str, Any]]:
456
+ """
457
+ Extract faces from a given image
458
+
459
+ Args:
460
+ img_path (str or np.ndarray): Path to the first image. Accepts exact image path
461
+ as a string, numpy array (BGR), or base64 encoded images.
462
+
463
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
464
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
465
+ (default is opencv).
466
+
467
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
468
+ Set to False to avoid the exception for low-resolution images (default is True).
469
+
470
+ align (bool): Flag to enable face alignment (default is True).
471
+
472
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
473
+
474
+ grayscale (boolean): Flag to convert the image to grayscale before
475
+ processing (default is False).
476
+
477
+ Returns:
478
+ results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary contains:
479
+
480
+ - "face" (np.ndarray): The detected face as a NumPy array.
481
+
482
+ - "facial_area" (Dict[str, Any]): The detected face's regions as a dictionary containing:
483
+ - keys 'x', 'y', 'w', 'h' with int values
484
+ - keys 'left_eye', 'right_eye' with a tuple of 2 ints as values. left and right eyes
485
+ are eyes on the left and right respectively with respect to the person itself
486
+ instead of observer.
487
+
488
+ - "confidence" (float): The confidence score associated with the detected face.
489
+ """
490
+
491
+ return detection.extract_faces(
492
+ img_path=img_path,
493
+ detector_backend=detector_backend,
494
+ enforce_detection=enforce_detection,
495
+ align=align,
496
+ expand_percentage=expand_percentage,
497
+ grayscale=grayscale,
498
+ )
499
+
500
+
501
+ def cli() -> None:
502
+ """
503
+ command line interface function will be offered in this block
504
+ """
505
+ import fire
506
+
507
+ fire.Fire()
508
+
509
+
510
+ # deprecated function(s)
511
+
512
+
513
+ def detectFace(
514
+ img_path: Union[str, np.ndarray],
515
+ target_size: tuple = (224, 224),
516
+ detector_backend: str = "opencv",
517
+ enforce_detection: bool = True,
518
+ align: bool = True,
519
+ ) -> Union[np.ndarray, None]:
520
+ """
521
+ Deprecated face detection function. Use extract_faces for same functionality.
522
+
523
+ Args:
524
+ img_path (str or np.ndarray): Path to the first image. Accepts exact image path
525
+ as a string, numpy array (BGR), or base64 encoded images.
526
+
527
+ target_size (tuple): final shape of facial image. black pixels will be
528
+ added to resize the image (default is (224, 224)).
529
+
530
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
531
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
532
+ (default is opencv).
533
+
534
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
535
+ Set to False to avoid the exception for low-resolution images (default is True).
536
+
537
+ align (bool): Flag to enable face alignment (default is True).
538
+
539
+ Returns:
540
+ img (np.ndarray): detected (and aligned) facial area image as numpy array
541
+ """
542
+ logger.warn("Function detectFace is deprecated. Use extract_faces instead.")
543
+ face_objs = extract_faces(
544
+ img_path=img_path,
545
+ detector_backend=detector_backend,
546
+ enforce_detection=enforce_detection,
547
+ align=align,
548
+ grayscale=False,
549
+ )
550
+ extracted_face = None
551
+ if len(face_objs) > 0:
552
+ extracted_face = face_objs[0]["face"]
553
+ extracted_face = preprocessing.resize_image(img=extracted_face, target_size=target_size)
554
+ return extracted_face
555
+
556
+
557
+ def sync_datasets():
558
+ # Set the local directories
559
+ base_dir = os_path.get_main_directory()
560
+
561
+ missing_dir = os.path.join(base_dir, 'mafqoud', 'images', 'missing_people')
562
+ founded_dir = os.path.join(base_dir, 'mafqoud', 'images', 'founded_people')
563
+
564
+ # Ensure the directories exist
565
+ os.makedirs(missing_dir, exist_ok=True)
566
+ os.makedirs(founded_dir, exist_ok=True)
567
+
568
+ missing_people = cloudservice.sync_folder('missing_people', missing_dir)
569
+
570
+ founded_people = cloudservice.sync_folder('founded_people', founded_dir)
571
+
572
+ def delete_pkls():
573
+ # Set the local directories
574
+ base_dir = os_path.get_main_directory()
575
+
576
+ missing_dir = os.path.join(base_dir, 'mafqoud', 'images', 'missing_people')
577
+ founded_dir = os.path.join(base_dir, 'mafqoud', 'images', 'founded_people')
578
+
579
+ # Ensure the directories exist
580
+ os.makedirs(missing_dir, exist_ok=True)
581
+ os.makedirs(founded_dir, exist_ok=True)
582
+
583
+ cloudservice.delete_pkl_files(missing_dir)
584
+ cloudservice.delete_pkl_files(founded_dir)
585
+
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2019 Sefik Ilkin Serengil
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
Makefile ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ test:
2
+ cd tests && python -m pytest . -s --disable-warnings
3
+
4
+ lint:
5
+ python -m pylint deepface/ --fail-under=10
6
+
7
+ coverage:
8
+ pip install pytest-cov && cd tests && python -m pytest --cov=deepface
Train.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # from deepface import DeepFace
3
+ # import os
4
+ # models = [
5
+ # "VGG-Face",
6
+ # "Facenet",
7
+ # "Facenet512",
8
+ # "OpenFace",
9
+ # "DeepFace",
10
+ # "DeepID",
11
+ # "ArcFace",
12
+ # "Dlib",
13
+ # "SFace",
14
+ # ]
15
+
16
+ # metrics = ["cosine", "euclidean", "euclidean_l2"]
17
+
18
+ # backends = [
19
+ # 'opencv',
20
+ # 'ssd',
21
+ # 'dlib',
22
+ # 'mtcnn',
23
+ # 'retinaface',
24
+ # 'mediapipe',
25
+ # 'yolov8',
26
+ # 'yunet',
27
+ # 'fastmtcnn',
28
+ # ]
29
+
30
+ # # df = DeepFace.find(img_path='F:/projects/python/mafqoud/dataset/missing_people/m0.jpg'
31
+ # # , db_path='F:/projects/python/mafqoud/dataset/founded_people'
32
+ # # , enforce_detection = True
33
+ # # , model_name = models[2]
34
+ # # , distance_metric = metrics[2]
35
+ # # , detector_backend = backends[3])
36
+
37
+ # DeepFace.stream(db_path = "F:/deepface")
38
+
39
+ # base_dir = os.path.abspath(os.path.dirname(__file__))
40
+ # # base_dir = "f:\\"
41
+ # founded_dir = os.path.join(base_dir, 'mafqoud', 'images', 'founded_people')
42
+ # def get_main_directory():
43
+ # path = os.path.abspath(__file__)
44
+ # drive, _ = os.path.splitdrive(path)
45
+ # if not drive.endswith(os.path.sep):
46
+ # drive += os.path.sep
47
+ # return drive
48
+
49
+ # base_dir = get_main_directory()
50
+ # missing_dir = os.path.join(base_dir, 'mafqoud', 'images', 'missing_people')
51
+ # print(missing_dir)
52
+
53
+ # print(base_dir)
54
+ # print(missing_dir)
55
+ # print(founded_dir)
__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "0.0.90"
__pycache__/DeepFace.cpython-312.pyc ADDED
Binary file (23.7 kB). View file
 
__pycache__/__init__.cpython-312.pyc ADDED
Binary file (186 Bytes). View file
 
api/__init__.py ADDED
File without changes
api/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (173 Bytes). View file
 
api/postman/deepface-api.postman_collection.json ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "info": {
3
+ "_postman_id": "4c0b144e-4294-4bdd-8072-bcb326b1fed2",
4
+ "name": "deepface-api",
5
+ "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
6
+ },
7
+ "item": [
8
+ {
9
+ "name": "Represent",
10
+ "request": {
11
+ "method": "POST",
12
+ "header": [],
13
+ "body": {
14
+ "mode": "raw",
15
+ "raw": "{\n \"model_name\": \"Facenet\",\n \"img\": \"/Users/sefik/Desktop/deepface/tests/dataset/img1.jpg\"\n}",
16
+ "options": {
17
+ "raw": {
18
+ "language": "json"
19
+ }
20
+ }
21
+ },
22
+ "url": {
23
+ "raw": "http://127.0.0.1:5000/represent",
24
+ "protocol": "http",
25
+ "host": [
26
+ "127",
27
+ "0",
28
+ "0",
29
+ "1"
30
+ ],
31
+ "port": "5000",
32
+ "path": [
33
+ "represent"
34
+ ]
35
+ }
36
+ },
37
+ "response": []
38
+ },
39
+ {
40
+ "name": "Face verification",
41
+ "request": {
42
+ "method": "POST",
43
+ "header": [],
44
+ "body": {
45
+ "mode": "raw",
46
+ "raw": " {\n \t\"img1_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/img1.jpg\",\n \"img2_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/img2.jpg\",\n \"model_name\": \"Facenet\",\n \"detector_backend\": \"mtcnn\",\n \"distance_metric\": \"euclidean\"\n }",
47
+ "options": {
48
+ "raw": {
49
+ "language": "json"
50
+ }
51
+ }
52
+ },
53
+ "url": {
54
+ "raw": "http://127.0.0.1:5000/verify",
55
+ "protocol": "http",
56
+ "host": [
57
+ "127",
58
+ "0",
59
+ "0",
60
+ "1"
61
+ ],
62
+ "port": "5000",
63
+ "path": [
64
+ "verify"
65
+ ]
66
+ }
67
+ },
68
+ "response": []
69
+ },
70
+ {
71
+ "name": "Face analysis",
72
+ "request": {
73
+ "method": "POST",
74
+ "header": [],
75
+ "body": {
76
+ "mode": "raw",
77
+ "raw": "{\n \"img_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/couple.jpg\",\n \"actions\": [\"age\", \"gender\", \"emotion\", \"race\"]\n}",
78
+ "options": {
79
+ "raw": {
80
+ "language": "json"
81
+ }
82
+ }
83
+ },
84
+ "url": {
85
+ "raw": "http://127.0.0.1:5000/analyze",
86
+ "protocol": "http",
87
+ "host": [
88
+ "127",
89
+ "0",
90
+ "0",
91
+ "1"
92
+ ],
93
+ "port": "5000",
94
+ "path": [
95
+ "analyze"
96
+ ]
97
+ }
98
+ },
99
+ "response": []
100
+ }
101
+ ]
102
+ }
api/src/__init__.py ADDED
File without changes
api/src/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (177 Bytes). View file
 
api/src/__pycache__/app.cpython-312.pyc ADDED
Binary file (585 Bytes). View file
 
api/src/api.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import app
3
+ import os
4
+
5
+ if __name__ == "__main__":
6
+ deepface_app = app.create_app()
7
+ parser = argparse.ArgumentParser()
8
+ parser.add_argument("-p", "--port", type=int, default=int(os.getenv('DEFAULT_PORT')), help="Port of serving api")
9
+ args = parser.parse_args()
10
+ deepface_app.run(host="0.0.0.0", port=args.port)
api/src/app.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 3rd parth dependencies
2
+ from flask import Flask
3
+ from deepface.api.src.modules.core.routes import blueprint
4
+
5
+
6
+ def create_app():
7
+ app = Flask(__name__)
8
+ app.register_blueprint(blueprint)
9
+ print(app.url_map)
10
+ return app
11
+
api/src/modules/__init__.py ADDED
File without changes
api/src/modules/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (185 Bytes). View file
 
api/src/modules/core/__init__.py ADDED
File without changes
api/src/modules/core/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (190 Bytes). View file
 
api/src/modules/core/__pycache__/routes.cpython-312.pyc ADDED
Binary file (8.62 kB). View file
 
api/src/modules/core/__pycache__/service.cpython-312.pyc ADDED
Binary file (3.1 kB). View file
 
api/src/modules/core/routes.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Blueprint, request , jsonify
2
+ from deepface.api.src.modules.core import service
3
+ from deepface.commons.logger import Logger
4
+ from deepface.commons.os_path import os_path
5
+ import json
6
+ import os
7
+
8
+ logger = Logger(module="api/src/routes.py")
9
+
10
+ blueprint = Blueprint("routes", __name__)
11
+
12
+
13
+ @blueprint.route("/")
14
+ def home():
15
+ return "<h1>Welcome to DeepFace API!</h1>"
16
+
17
+
18
+ @blueprint.route("/represent", methods=["POST"])
19
+ def represent():
20
+ input_args = request.get_json()
21
+
22
+ if input_args is None:
23
+ return {"message": "empty input set passed"}
24
+
25
+ img_path = input_args.get("img") or input_args.get("img_path")
26
+ if img_path is None:
27
+ return {"message": "you must pass img_path input"}
28
+
29
+ model_name = input_args.get("model_name", "VGG-Face")
30
+ detector_backend = input_args.get("detector_backend", "opencv")
31
+ enforce_detection = input_args.get("enforce_detection", True)
32
+ align = input_args.get("align", True)
33
+
34
+ obj = service.represent(
35
+ img_path=img_path,
36
+ model_name=model_name,
37
+ detector_backend=detector_backend,
38
+ enforce_detection=enforce_detection,
39
+ align=align,
40
+ )
41
+
42
+ logger.debug(obj)
43
+
44
+ return obj
45
+
46
+
47
+ @blueprint.route("/verify", methods=["POST"])
48
+ def verify():
49
+ input_args = request.get_json()
50
+
51
+ if input_args is None:
52
+ return {"message": "empty input set passed"}
53
+
54
+ img1_path = input_args.get("img1") or input_args.get("img1_path")
55
+ img2_path = input_args.get("img2") or input_args.get("img2_path")
56
+
57
+ if img1_path is None:
58
+ return {"message": "you must pass img1_path input"}
59
+
60
+ if img2_path is None:
61
+ return {"message": "you must pass img2_path input"}
62
+
63
+ model_name = input_args.get("model_name", "VGG-Face")
64
+ detector_backend = input_args.get("detector_backend", "opencv")
65
+ enforce_detection = input_args.get("enforce_detection", True)
66
+ distance_metric = input_args.get("distance_metric", "cosine")
67
+ align = input_args.get("align", True)
68
+
69
+ verification = service.verify(
70
+ img1_path=img1_path,
71
+ img2_path=img2_path,
72
+ model_name=model_name,
73
+ detector_backend=detector_backend,
74
+ distance_metric=distance_metric,
75
+ align=align,
76
+ enforce_detection=enforce_detection,
77
+ )
78
+
79
+ logger.debug(verification)
80
+
81
+ return verification
82
+
83
+
84
+ @blueprint.route("/analyze", methods=["POST"])
85
+ def analyze():
86
+ input_args = request.get_json()
87
+
88
+ if input_args is None:
89
+ return {"message": "empty input set passed"}
90
+
91
+ img_path = input_args.get("img") or input_args.get("img_path")
92
+ if img_path is None:
93
+ return {"message": "you must pass img_path input"}
94
+
95
+ detector_backend = input_args.get("detector_backend", "opencv")
96
+ enforce_detection = input_args.get("enforce_detection", True)
97
+ align = input_args.get("align", True)
98
+ actions = input_args.get("actions", ["age", "gender", "emotion", "race"])
99
+
100
+ demographies = service.analyze(
101
+ img_path=img_path,
102
+ actions=actions,
103
+ detector_backend=detector_backend,
104
+ enforce_detection=enforce_detection,
105
+ align=align,
106
+ )
107
+
108
+ logger.debug(demographies)
109
+
110
+ return demographies
111
+
112
+ @blueprint.route("/find", methods=["POST"])
113
+ def find():
114
+ input_args = request.get_json()
115
+
116
+ if input_args is None:
117
+ response = jsonify({'error': 'empty input set passed'})
118
+ response.status_code = 500
119
+ return response
120
+
121
+ img_name = input_args.get("img") or input_args.get("img_name")
122
+ img_type = input_args.get("img_type")
123
+
124
+ if img_name is None:
125
+ response = jsonify({'error': 'you must pass img_name input'})
126
+ response.status_code = 404
127
+ return response
128
+
129
+ if img_type == "missing" or img_type == "missing_person" or img_type == "missing_people" or img_type == "missing person" or img_type == "missing people" :
130
+
131
+ img_path = os.path.join( os_path.get_main_directory() , 'mafqoud' , 'images' , "missing_people" , img_name)
132
+ db_path = os.path.join( os_path.get_main_directory() , 'mafqoud' , 'images' , "founded_people")
133
+
134
+ elif img_type == "founded" or img_type == "founded_person" or img_type == "founded_people" or img_type == "founded person" or img_type == "founded people" :
135
+
136
+ img_path = os.path.join( os_path.get_main_directory() , 'mafqoud' , 'images' , "founded_people" , img_name)
137
+ db_path = os.path.join( os_path.get_main_directory() , 'mafqoud' , 'images' , "missing_people")
138
+
139
+ else :
140
+
141
+ response = jsonify({'error': 'the type of the image is not correct and it should be one of those : ( missing , missing_people , missing_people , missing person , missing people ) or ( founded , founded_people , founded_people , founded person , founded people )'})
142
+ response.status_code = 400
143
+ return response
144
+
145
+ print(img_path)
146
+ if not os.path.exists(img_path) or not os.path.isfile(img_path):
147
+ # If the image does not exist, return a JSON response with status code 404
148
+ response = jsonify({'error': 'Image not found'})
149
+ response.status_code = 404
150
+ return response
151
+
152
+
153
+ model_name = input_args.get("model_name", "Facenet512")
154
+ detector_backend = input_args.get("detector_backend", "mtcnn")
155
+ enforce_detection = input_args.get("enforce_detection", True)
156
+ distance_metric = input_args.get("distance_metric", "euclidean_l2")
157
+ align = input_args.get("align", True)
158
+
159
+ if img_name is None:
160
+ return {"message": "you must pass img1_path input"}
161
+
162
+ if db_path is None:
163
+ dataset_path = os.path.join(path.get_parent_path(), 'dataset')
164
+ if img_type == "missing_person":
165
+ img_path = os.path.join(dataset_path, 'missing_people', img_name)
166
+ db_path = os.path.join(dataset_path, 'founded_people')
167
+ elif img_type == "founded_people":
168
+ img_path = os.path.join(dataset_path, 'founded_people', img_name)
169
+ db_path = os.path.join(dataset_path, 'missing_people')
170
+
171
+ results = service.find(
172
+ img_path=img_path,
173
+ db_path=db_path,
174
+ model_name=model_name,
175
+ detector_backend=detector_backend,
176
+ distance_metric=distance_metric,
177
+ align=align,
178
+ enforce_detection=enforce_detection,
179
+ )
180
+
181
+ # Calculate similarity_percentage for each row
182
+ results[0]['similarity_percentage'] =100 - ((results[0]['distance'] / results[0]['threshold']) * 100)
183
+
184
+ data = []
185
+ for _, row in results[0].iterrows():
186
+ data.append({
187
+ "identity": row['identity'],
188
+ "similarity_percentage": row['similarity_percentage']
189
+ })
190
+
191
+ json_data = json.dumps(data, indent=4)
192
+
193
+
194
+ logger.debug(json_data)
195
+ return json_data
196
+
197
+
198
+ @blueprint.route("/dataset/sync", methods=["GET"])
199
+ def sync_datasets():
200
+ result = service.sync_datasets()
201
+ return jsonify(result)
202
+
203
+
204
+ @blueprint.route("/delete/pkls", methods=["GET"])
205
+ def delete_pkls():
206
+ result = service.delete_pkls()
207
+ return jsonify(result)
api/src/modules/core/service.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from deepface import DeepFace
2
+
3
+ # pylint: disable=broad-except
4
+
5
+
6
+ def represent(img_path, model_name, detector_backend, enforce_detection, align):
7
+ try:
8
+ result = {}
9
+ embedding_objs = DeepFace.represent(
10
+ img_path=img_path,
11
+ model_name=model_name,
12
+ detector_backend=detector_backend,
13
+ enforce_detection=enforce_detection,
14
+ align=align,
15
+ )
16
+ result["results"] = embedding_objs
17
+ return result
18
+ except Exception as err:
19
+ return {"error": f"Exception while representing: {str(err)}"}, 400
20
+
21
+
22
+ def verify(
23
+ img1_path, img2_path, model_name, detector_backend, distance_metric, enforce_detection, align
24
+ ):
25
+ try:
26
+ obj = DeepFace.verify(
27
+ img1_path=img1_path,
28
+ img2_path=img2_path,
29
+ model_name=model_name,
30
+ detector_backend=detector_backend,
31
+ distance_metric=distance_metric,
32
+ align=align,
33
+ enforce_detection=enforce_detection,
34
+ )
35
+ return obj
36
+ except Exception as err:
37
+ return {"error": f"Exception while verifying: {str(err)}"}, 400
38
+
39
+
40
+ def analyze(img_path, actions, detector_backend, enforce_detection, align):
41
+ try:
42
+ result = {}
43
+ demographies = DeepFace.analyze(
44
+ img_path=img_path,
45
+ actions=actions,
46
+ detector_backend=detector_backend,
47
+ enforce_detection=enforce_detection,
48
+ align=align,
49
+ silent=True,
50
+ )
51
+ result["results"] = demographies
52
+ return result
53
+ except Exception as err:
54
+ return {"error": f"Exception while analyzing: {str(err)}"}, 400
55
+
56
+ def find(img_path, db_path, model_name, detector_backend, distance_metric, enforce_detection, align):
57
+ try:
58
+ obj = DeepFace.find(
59
+ img_path=img_path,
60
+ db_path=db_path,
61
+ model_name=model_name,
62
+ detector_backend=detector_backend,
63
+ distance_metric=distance_metric,
64
+ align=align,
65
+ enforce_detection=enforce_detection,
66
+ )
67
+ return obj
68
+ except Exception as err:
69
+ return {"error": f"Exception while Findind: {str(err)}"}, 400
70
+
71
+
72
+ def sync_datasets():
73
+ try:
74
+ DeepFace.sync_datasets()
75
+ return {'data': 'synced successfully'}, 200
76
+ except Exception as e:
77
+ return {'error': str(e)}, 400
78
+
79
+ def delete_pkls():
80
+ try:
81
+ DeepFace.delete_pkls()
82
+ return {'data': 'pkl files deleted successfully'}, 200
83
+ except Exception as e:
84
+ return {'error': str(e)}, 400
basemodels/ArcFace.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gdown
3
+ from deepface.commons import package_utils, folder_utils
4
+ from deepface.models.FacialRecognition import FacialRecognition
5
+
6
+ from deepface.commons import logger as log
7
+
8
+ logger = log.get_singletonish_logger()
9
+
10
+ # pylint: disable=unsubscriptable-object
11
+
12
+ # --------------------------------
13
+ # dependency configuration
14
+
15
+ tf_version = package_utils.get_tf_major_version()
16
+
17
+ if tf_version == 1:
18
+ from keras.models import Model
19
+ from keras.engine import training
20
+ from keras.layers import (
21
+ ZeroPadding2D,
22
+ Input,
23
+ Conv2D,
24
+ BatchNormalization,
25
+ PReLU,
26
+ Add,
27
+ Dropout,
28
+ Flatten,
29
+ Dense,
30
+ )
31
+ else:
32
+ from tensorflow.keras.models import Model
33
+ from tensorflow.python.keras.engine import training
34
+ from tensorflow.keras.layers import (
35
+ ZeroPadding2D,
36
+ Input,
37
+ Conv2D,
38
+ BatchNormalization,
39
+ PReLU,
40
+ Add,
41
+ Dropout,
42
+ Flatten,
43
+ Dense,
44
+ )
45
+
46
+ # pylint: disable=too-few-public-methods
47
+ class ArcFaceClient(FacialRecognition):
48
+ """
49
+ ArcFace model class
50
+ """
51
+
52
+ def __init__(self):
53
+ self.model = load_model()
54
+ self.model_name = "ArcFace"
55
+ self.input_shape = (112, 112)
56
+ self.output_shape = 512
57
+
58
+
59
+ def load_model(
60
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/arcface_weights.h5",
61
+ ) -> Model:
62
+ """
63
+ Construct ArcFace model, download its weights and load
64
+ Returns:
65
+ model (Model)
66
+ """
67
+ base_model = ResNet34()
68
+ inputs = base_model.inputs[0]
69
+ arcface_model = base_model.outputs[0]
70
+ arcface_model = BatchNormalization(momentum=0.9, epsilon=2e-5)(arcface_model)
71
+ arcface_model = Dropout(0.4)(arcface_model)
72
+ arcface_model = Flatten()(arcface_model)
73
+ arcface_model = Dense(512, activation=None, use_bias=True, kernel_initializer="glorot_normal")(
74
+ arcface_model
75
+ )
76
+ embedding = BatchNormalization(momentum=0.9, epsilon=2e-5, name="embedding", scale=True)(
77
+ arcface_model
78
+ )
79
+ model = Model(inputs, embedding, name=base_model.name)
80
+
81
+ # ---------------------------------------
82
+ # check the availability of pre-trained weights
83
+
84
+ home = folder_utils.get_deepface_home()
85
+
86
+ file_name = "arcface_weights.h5"
87
+ output = home + "/.deepface/weights/" + file_name
88
+
89
+ if os.path.isfile(output) != True:
90
+
91
+ logger.info(f"{file_name} will be downloaded to {output}")
92
+ gdown.download(url, output, quiet=False)
93
+
94
+ # ---------------------------------------
95
+
96
+ model.load_weights(output)
97
+
98
+ return model
99
+
100
+
101
+ def ResNet34() -> Model:
102
+ """
103
+ ResNet34 model
104
+ Returns:
105
+ model (Model)
106
+ """
107
+ img_input = Input(shape=(112, 112, 3))
108
+
109
+ x = ZeroPadding2D(padding=1, name="conv1_pad")(img_input)
110
+ x = Conv2D(
111
+ 64, 3, strides=1, use_bias=False, kernel_initializer="glorot_normal", name="conv1_conv"
112
+ )(x)
113
+ x = BatchNormalization(axis=3, epsilon=2e-5, momentum=0.9, name="conv1_bn")(x)
114
+ x = PReLU(shared_axes=[1, 2], name="conv1_prelu")(x)
115
+ x = stack_fn(x)
116
+
117
+ model = training.Model(img_input, x, name="ResNet34")
118
+
119
+ return model
120
+
121
+
122
+ def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
123
+ bn_axis = 3
124
+
125
+ if conv_shortcut:
126
+ shortcut = Conv2D(
127
+ filters,
128
+ 1,
129
+ strides=stride,
130
+ use_bias=False,
131
+ kernel_initializer="glorot_normal",
132
+ name=name + "_0_conv",
133
+ )(x)
134
+ shortcut = BatchNormalization(
135
+ axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_0_bn"
136
+ )(shortcut)
137
+ else:
138
+ shortcut = x
139
+
140
+ x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_1_bn")(x)
141
+ x = ZeroPadding2D(padding=1, name=name + "_1_pad")(x)
142
+ x = Conv2D(
143
+ filters,
144
+ 3,
145
+ strides=1,
146
+ kernel_initializer="glorot_normal",
147
+ use_bias=False,
148
+ name=name + "_1_conv",
149
+ )(x)
150
+ x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_2_bn")(x)
151
+ x = PReLU(shared_axes=[1, 2], name=name + "_1_prelu")(x)
152
+
153
+ x = ZeroPadding2D(padding=1, name=name + "_2_pad")(x)
154
+ x = Conv2D(
155
+ filters,
156
+ kernel_size,
157
+ strides=stride,
158
+ kernel_initializer="glorot_normal",
159
+ use_bias=False,
160
+ name=name + "_2_conv",
161
+ )(x)
162
+ x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_3_bn")(x)
163
+
164
+ x = Add(name=name + "_add")([shortcut, x])
165
+ return x
166
+
167
+
168
+ def stack1(x, filters, blocks, stride1=2, name=None):
169
+ x = block1(x, filters, stride=stride1, name=name + "_block1")
170
+ for i in range(2, blocks + 1):
171
+ x = block1(x, filters, conv_shortcut=False, name=name + "_block" + str(i))
172
+ return x
173
+
174
+
175
+ def stack_fn(x):
176
+ x = stack1(x, 64, 3, name="conv2")
177
+ x = stack1(x, 128, 4, name="conv3")
178
+ x = stack1(x, 256, 6, name="conv4")
179
+ return stack1(x, 512, 3, name="conv5")
basemodels/DeepID.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gdown
3
+ from deepface.commons import package_utils, folder_utils
4
+ from deepface.models.FacialRecognition import FacialRecognition
5
+ from deepface.commons import logger as log
6
+
7
+ logger = log.get_singletonish_logger()
8
+
9
+ tf_version = package_utils.get_tf_major_version()
10
+
11
+ if tf_version == 1:
12
+ from keras.models import Model
13
+ from keras.layers import (
14
+ Conv2D,
15
+ Activation,
16
+ Input,
17
+ Add,
18
+ MaxPooling2D,
19
+ Flatten,
20
+ Dense,
21
+ Dropout,
22
+ )
23
+ else:
24
+ from tensorflow.keras.models import Model
25
+ from tensorflow.keras.layers import (
26
+ Conv2D,
27
+ Activation,
28
+ Input,
29
+ Add,
30
+ MaxPooling2D,
31
+ Flatten,
32
+ Dense,
33
+ Dropout,
34
+ )
35
+
36
+ # pylint: disable=line-too-long
37
+
38
+
39
+ # -------------------------------------
40
+
41
+ # pylint: disable=too-few-public-methods
42
+ class DeepIdClient(FacialRecognition):
43
+ """
44
+ DeepId model class
45
+ """
46
+
47
+ def __init__(self):
48
+ self.model = load_model()
49
+ self.model_name = "DeepId"
50
+ self.input_shape = (47, 55)
51
+ self.output_shape = 160
52
+
53
+
54
+ def load_model(
55
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/deepid_keras_weights.h5",
56
+ ) -> Model:
57
+ """
58
+ Construct DeepId model, download its weights and load
59
+ """
60
+
61
+ myInput = Input(shape=(55, 47, 3))
62
+
63
+ x = Conv2D(20, (4, 4), name="Conv1", activation="relu", input_shape=(55, 47, 3))(myInput)
64
+ x = MaxPooling2D(pool_size=2, strides=2, name="Pool1")(x)
65
+ x = Dropout(rate=0.99, name="D1")(x)
66
+
67
+ x = Conv2D(40, (3, 3), name="Conv2", activation="relu")(x)
68
+ x = MaxPooling2D(pool_size=2, strides=2, name="Pool2")(x)
69
+ x = Dropout(rate=0.99, name="D2")(x)
70
+
71
+ x = Conv2D(60, (3, 3), name="Conv3", activation="relu")(x)
72
+ x = MaxPooling2D(pool_size=2, strides=2, name="Pool3")(x)
73
+ x = Dropout(rate=0.99, name="D3")(x)
74
+
75
+ x1 = Flatten()(x)
76
+ fc11 = Dense(160, name="fc11")(x1)
77
+
78
+ x2 = Conv2D(80, (2, 2), name="Conv4", activation="relu")(x)
79
+ x2 = Flatten()(x2)
80
+ fc12 = Dense(160, name="fc12")(x2)
81
+
82
+ y = Add()([fc11, fc12])
83
+ y = Activation("relu", name="deepid")(y)
84
+
85
+ model = Model(inputs=[myInput], outputs=y)
86
+
87
+ # ---------------------------------
88
+
89
+ home = folder_utils.get_deepface_home()
90
+
91
+ if os.path.isfile(home + "/.deepface/weights/deepid_keras_weights.h5") != True:
92
+ logger.info("deepid_keras_weights.h5 will be downloaded...")
93
+
94
+ output = home + "/.deepface/weights/deepid_keras_weights.h5"
95
+ gdown.download(url, output, quiet=False)
96
+
97
+ model.load_weights(home + "/.deepface/weights/deepid_keras_weights.h5")
98
+
99
+ return model
basemodels/Dlib.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import os
3
+ import bz2
4
+ import gdown
5
+ import numpy as np
6
+ from deepface.commons import folder_utils
7
+ from deepface.models.FacialRecognition import FacialRecognition
8
+ from deepface.commons import logger as log
9
+
10
+ logger = log.get_singletonish_logger()
11
+
12
+ # pylint: disable=too-few-public-methods
13
+
14
+
15
+ class DlibClient(FacialRecognition):
16
+ """
17
+ Dlib model class
18
+ """
19
+
20
+ def __init__(self):
21
+ self.model = DlibResNet()
22
+ self.model_name = "Dlib"
23
+ self.input_shape = (150, 150)
24
+ self.output_shape = 128
25
+
26
+ def forward(self, img: np.ndarray) -> List[float]:
27
+ """
28
+ Find embeddings with Dlib model.
29
+ This model necessitates the override of the forward method
30
+ because it is not a keras model.
31
+ Args:
32
+ img (np.ndarray): pre-loaded image in BGR
33
+ Returns
34
+ embeddings (list): multi-dimensional vector
35
+ """
36
+ # return self.model.predict(img)[0].tolist()
37
+
38
+ # extract_faces returns 4 dimensional images
39
+ if len(img.shape) == 4:
40
+ img = img[0]
41
+
42
+ # bgr to rgb
43
+ img = img[:, :, ::-1] # bgr to rgb
44
+
45
+ # img is in scale of [0, 1] but expected [0, 255]
46
+ if img.max() <= 1:
47
+ img = img * 255
48
+
49
+ img = img.astype(np.uint8)
50
+
51
+ img_representation = self.model.model.compute_face_descriptor(img)
52
+ img_representation = np.array(img_representation)
53
+ img_representation = np.expand_dims(img_representation, axis=0)
54
+ return img_representation[0].tolist()
55
+
56
+
57
+ class DlibResNet:
58
+ def __init__(self):
59
+
60
+ ## this is not a must dependency. do not import it in the global level.
61
+ try:
62
+ import dlib
63
+ except ModuleNotFoundError as e:
64
+ raise ImportError(
65
+ "Dlib is an optional dependency, ensure the library is installed."
66
+ "Please install using 'pip install dlib' "
67
+ ) from e
68
+
69
+ home = folder_utils.get_deepface_home()
70
+ weight_file = home + "/.deepface/weights/dlib_face_recognition_resnet_model_v1.dat"
71
+
72
+ # download pre-trained model if it does not exist
73
+ if os.path.isfile(weight_file) != True:
74
+ logger.info("dlib_face_recognition_resnet_model_v1.dat is going to be downloaded")
75
+
76
+ file_name = "dlib_face_recognition_resnet_model_v1.dat.bz2"
77
+ url = f"http://dlib.net/files/{file_name}"
78
+ output = f"{home}/.deepface/weights/{file_name}"
79
+ gdown.download(url, output, quiet=False)
80
+
81
+ zipfile = bz2.BZ2File(output)
82
+ data = zipfile.read()
83
+ newfilepath = output[:-4] # discard .bz2 extension
84
+ with open(newfilepath, "wb") as f:
85
+ f.write(data)
86
+
87
+ self.model = dlib.face_recognition_model_v1(weight_file)
88
+
89
+ # return None # classes must return None
basemodels/Facenet.py ADDED
@@ -0,0 +1,1715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gdown
3
+ from deepface.commons import package_utils, folder_utils
4
+ from deepface.models.FacialRecognition import FacialRecognition
5
+ from deepface.commons import logger as log
6
+
7
+ logger = log.get_singletonish_logger()
8
+
9
+ # --------------------------------
10
+ # dependency configuration
11
+
12
+ tf_version = package_utils.get_tf_major_version()
13
+
14
+ if tf_version == 1:
15
+ from keras.models import Model
16
+ from keras.layers import Activation
17
+ from keras.layers import BatchNormalization
18
+ from keras.layers import Concatenate
19
+ from keras.layers import Conv2D
20
+ from keras.layers import Dense
21
+ from keras.layers import Dropout
22
+ from keras.layers import GlobalAveragePooling2D
23
+ from keras.layers import Input
24
+ from keras.layers import Lambda
25
+ from keras.layers import MaxPooling2D
26
+ from keras.layers import add
27
+ from keras import backend as K
28
+ else:
29
+ from tensorflow.keras.models import Model
30
+ from tensorflow.keras.layers import Activation
31
+ from tensorflow.keras.layers import BatchNormalization
32
+ from tensorflow.keras.layers import Concatenate
33
+ from tensorflow.keras.layers import Conv2D
34
+ from tensorflow.keras.layers import Dense
35
+ from tensorflow.keras.layers import Dropout
36
+ from tensorflow.keras.layers import GlobalAveragePooling2D
37
+ from tensorflow.keras.layers import Input
38
+ from tensorflow.keras.layers import Lambda
39
+ from tensorflow.keras.layers import MaxPooling2D
40
+ from tensorflow.keras.layers import add
41
+ from tensorflow.keras import backend as K
42
+
43
+ # --------------------------------
44
+
45
+ # pylint: disable=too-few-public-methods
46
+ class FaceNet128dClient(FacialRecognition):
47
+ """
48
+ FaceNet-128d model class
49
+ """
50
+
51
+ def __init__(self):
52
+ self.model = load_facenet128d_model()
53
+ self.model_name = "FaceNet-128d"
54
+ self.input_shape = (160, 160)
55
+ self.output_shape = 128
56
+
57
+
58
+ class FaceNet512dClient(FacialRecognition):
59
+ """
60
+ FaceNet-1512d model class
61
+ """
62
+
63
+ def __init__(self):
64
+ self.model = load_facenet512d_model()
65
+ self.model_name = "FaceNet-512d"
66
+ self.input_shape = (160, 160)
67
+ self.output_shape = 512
68
+
69
+
70
+ def scaling(x, scale):
71
+ return x * scale
72
+
73
+
74
+ def InceptionResNetV1(dimension: int = 128) -> Model:
75
+ """
76
+ InceptionResNetV1 model heavily inspired from
77
+ github.com/davidsandberg/facenet/blob/master/src/models/inception_resnet_v1.py
78
+ As mentioned in Sandberg's repo's readme, pre-trained models are using Inception ResNet v1
79
+ Besides training process is documented at
80
+ sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/
81
+
82
+ Args:
83
+ dimension (int): number of dimensions in the embedding layer
84
+ Returns:
85
+ model (Model)
86
+ """
87
+
88
+ inputs = Input(shape=(160, 160, 3))
89
+ x = Conv2D(32, 3, strides=2, padding="valid", use_bias=False, name="Conv2d_1a_3x3")(inputs)
90
+ x = BatchNormalization(
91
+ axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_1a_3x3_BatchNorm"
92
+ )(x)
93
+ x = Activation("relu", name="Conv2d_1a_3x3_Activation")(x)
94
+ x = Conv2D(32, 3, strides=1, padding="valid", use_bias=False, name="Conv2d_2a_3x3")(x)
95
+ x = BatchNormalization(
96
+ axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_2a_3x3_BatchNorm"
97
+ )(x)
98
+ x = Activation("relu", name="Conv2d_2a_3x3_Activation")(x)
99
+ x = Conv2D(64, 3, strides=1, padding="same", use_bias=False, name="Conv2d_2b_3x3")(x)
100
+ x = BatchNormalization(
101
+ axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_2b_3x3_BatchNorm"
102
+ )(x)
103
+ x = Activation("relu", name="Conv2d_2b_3x3_Activation")(x)
104
+ x = MaxPooling2D(3, strides=2, name="MaxPool_3a_3x3")(x)
105
+ x = Conv2D(80, 1, strides=1, padding="valid", use_bias=False, name="Conv2d_3b_1x1")(x)
106
+ x = BatchNormalization(
107
+ axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_3b_1x1_BatchNorm"
108
+ )(x)
109
+ x = Activation("relu", name="Conv2d_3b_1x1_Activation")(x)
110
+ x = Conv2D(192, 3, strides=1, padding="valid", use_bias=False, name="Conv2d_4a_3x3")(x)
111
+ x = BatchNormalization(
112
+ axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_4a_3x3_BatchNorm"
113
+ )(x)
114
+ x = Activation("relu", name="Conv2d_4a_3x3_Activation")(x)
115
+ x = Conv2D(256, 3, strides=2, padding="valid", use_bias=False, name="Conv2d_4b_3x3")(x)
116
+ x = BatchNormalization(
117
+ axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_4b_3x3_BatchNorm"
118
+ )(x)
119
+ x = Activation("relu", name="Conv2d_4b_3x3_Activation")(x)
120
+
121
+ # 5x Block35 (Inception-ResNet-A block):
122
+ branch_0 = Conv2D(
123
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_0_Conv2d_1x1"
124
+ )(x)
125
+ branch_0 = BatchNormalization(
126
+ axis=3,
127
+ momentum=0.995,
128
+ epsilon=0.001,
129
+ scale=False,
130
+ name="Block35_1_Branch_0_Conv2d_1x1_BatchNorm",
131
+ )(branch_0)
132
+ branch_0 = Activation("relu", name="Block35_1_Branch_0_Conv2d_1x1_Activation")(branch_0)
133
+ branch_1 = Conv2D(
134
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_1_Conv2d_0a_1x1"
135
+ )(x)
136
+ branch_1 = BatchNormalization(
137
+ axis=3,
138
+ momentum=0.995,
139
+ epsilon=0.001,
140
+ scale=False,
141
+ name="Block35_1_Branch_1_Conv2d_0a_1x1_BatchNorm",
142
+ )(branch_1)
143
+ branch_1 = Activation("relu", name="Block35_1_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
144
+ branch_1 = Conv2D(
145
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_1_Conv2d_0b_3x3"
146
+ )(branch_1)
147
+ branch_1 = BatchNormalization(
148
+ axis=3,
149
+ momentum=0.995,
150
+ epsilon=0.001,
151
+ scale=False,
152
+ name="Block35_1_Branch_1_Conv2d_0b_3x3_BatchNorm",
153
+ )(branch_1)
154
+ branch_1 = Activation("relu", name="Block35_1_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
155
+ branch_2 = Conv2D(
156
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_2_Conv2d_0a_1x1"
157
+ )(x)
158
+ branch_2 = BatchNormalization(
159
+ axis=3,
160
+ momentum=0.995,
161
+ epsilon=0.001,
162
+ scale=False,
163
+ name="Block35_1_Branch_2_Conv2d_0a_1x1_BatchNorm",
164
+ )(branch_2)
165
+ branch_2 = Activation("relu", name="Block35_1_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
166
+ branch_2 = Conv2D(
167
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_2_Conv2d_0b_3x3"
168
+ )(branch_2)
169
+ branch_2 = BatchNormalization(
170
+ axis=3,
171
+ momentum=0.995,
172
+ epsilon=0.001,
173
+ scale=False,
174
+ name="Block35_1_Branch_2_Conv2d_0b_3x3_BatchNorm",
175
+ )(branch_2)
176
+ branch_2 = Activation("relu", name="Block35_1_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
177
+ branch_2 = Conv2D(
178
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_2_Conv2d_0c_3x3"
179
+ )(branch_2)
180
+ branch_2 = BatchNormalization(
181
+ axis=3,
182
+ momentum=0.995,
183
+ epsilon=0.001,
184
+ scale=False,
185
+ name="Block35_1_Branch_2_Conv2d_0c_3x3_BatchNorm",
186
+ )(branch_2)
187
+ branch_2 = Activation("relu", name="Block35_1_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
188
+ branches = [branch_0, branch_1, branch_2]
189
+ mixed = Concatenate(axis=3, name="Block35_1_Concatenate")(branches)
190
+ up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_1_Conv2d_1x1")(
191
+ mixed
192
+ )
193
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
194
+ x = add([x, up])
195
+ x = Activation("relu", name="Block35_1_Activation")(x)
196
+
197
+ branch_0 = Conv2D(
198
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_0_Conv2d_1x1"
199
+ )(x)
200
+ branch_0 = BatchNormalization(
201
+ axis=3,
202
+ momentum=0.995,
203
+ epsilon=0.001,
204
+ scale=False,
205
+ name="Block35_2_Branch_0_Conv2d_1x1_BatchNorm",
206
+ )(branch_0)
207
+ branch_0 = Activation("relu", name="Block35_2_Branch_0_Conv2d_1x1_Activation")(branch_0)
208
+ branch_1 = Conv2D(
209
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_1_Conv2d_0a_1x1"
210
+ )(x)
211
+ branch_1 = BatchNormalization(
212
+ axis=3,
213
+ momentum=0.995,
214
+ epsilon=0.001,
215
+ scale=False,
216
+ name="Block35_2_Branch_1_Conv2d_0a_1x1_BatchNorm",
217
+ )(branch_1)
218
+ branch_1 = Activation("relu", name="Block35_2_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
219
+ branch_1 = Conv2D(
220
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_1_Conv2d_0b_3x3"
221
+ )(branch_1)
222
+ branch_1 = BatchNormalization(
223
+ axis=3,
224
+ momentum=0.995,
225
+ epsilon=0.001,
226
+ scale=False,
227
+ name="Block35_2_Branch_1_Conv2d_0b_3x3_BatchNorm",
228
+ )(branch_1)
229
+ branch_1 = Activation("relu", name="Block35_2_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
230
+ branch_2 = Conv2D(
231
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_2_Conv2d_0a_1x1"
232
+ )(x)
233
+ branch_2 = BatchNormalization(
234
+ axis=3,
235
+ momentum=0.995,
236
+ epsilon=0.001,
237
+ scale=False,
238
+ name="Block35_2_Branch_2_Conv2d_0a_1x1_BatchNorm",
239
+ )(branch_2)
240
+ branch_2 = Activation("relu", name="Block35_2_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
241
+ branch_2 = Conv2D(
242
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_2_Conv2d_0b_3x3"
243
+ )(branch_2)
244
+ branch_2 = BatchNormalization(
245
+ axis=3,
246
+ momentum=0.995,
247
+ epsilon=0.001,
248
+ scale=False,
249
+ name="Block35_2_Branch_2_Conv2d_0b_3x3_BatchNorm",
250
+ )(branch_2)
251
+ branch_2 = Activation("relu", name="Block35_2_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
252
+ branch_2 = Conv2D(
253
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_2_Conv2d_0c_3x3"
254
+ )(branch_2)
255
+ branch_2 = BatchNormalization(
256
+ axis=3,
257
+ momentum=0.995,
258
+ epsilon=0.001,
259
+ scale=False,
260
+ name="Block35_2_Branch_2_Conv2d_0c_3x3_BatchNorm",
261
+ )(branch_2)
262
+ branch_2 = Activation("relu", name="Block35_2_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
263
+ branches = [branch_0, branch_1, branch_2]
264
+ mixed = Concatenate(axis=3, name="Block35_2_Concatenate")(branches)
265
+ up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_2_Conv2d_1x1")(
266
+ mixed
267
+ )
268
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
269
+ x = add([x, up])
270
+ x = Activation("relu", name="Block35_2_Activation")(x)
271
+
272
+ branch_0 = Conv2D(
273
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_0_Conv2d_1x1"
274
+ )(x)
275
+ branch_0 = BatchNormalization(
276
+ axis=3,
277
+ momentum=0.995,
278
+ epsilon=0.001,
279
+ scale=False,
280
+ name="Block35_3_Branch_0_Conv2d_1x1_BatchNorm",
281
+ )(branch_0)
282
+ branch_0 = Activation("relu", name="Block35_3_Branch_0_Conv2d_1x1_Activation")(branch_0)
283
+ branch_1 = Conv2D(
284
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_1_Conv2d_0a_1x1"
285
+ )(x)
286
+ branch_1 = BatchNormalization(
287
+ axis=3,
288
+ momentum=0.995,
289
+ epsilon=0.001,
290
+ scale=False,
291
+ name="Block35_3_Branch_1_Conv2d_0a_1x1_BatchNorm",
292
+ )(branch_1)
293
+ branch_1 = Activation("relu", name="Block35_3_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
294
+ branch_1 = Conv2D(
295
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_1_Conv2d_0b_3x3"
296
+ )(branch_1)
297
+ branch_1 = BatchNormalization(
298
+ axis=3,
299
+ momentum=0.995,
300
+ epsilon=0.001,
301
+ scale=False,
302
+ name="Block35_3_Branch_1_Conv2d_0b_3x3_BatchNorm",
303
+ )(branch_1)
304
+ branch_1 = Activation("relu", name="Block35_3_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
305
+ branch_2 = Conv2D(
306
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_2_Conv2d_0a_1x1"
307
+ )(x)
308
+ branch_2 = BatchNormalization(
309
+ axis=3,
310
+ momentum=0.995,
311
+ epsilon=0.001,
312
+ scale=False,
313
+ name="Block35_3_Branch_2_Conv2d_0a_1x1_BatchNorm",
314
+ )(branch_2)
315
+ branch_2 = Activation("relu", name="Block35_3_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
316
+ branch_2 = Conv2D(
317
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_2_Conv2d_0b_3x3"
318
+ )(branch_2)
319
+ branch_2 = BatchNormalization(
320
+ axis=3,
321
+ momentum=0.995,
322
+ epsilon=0.001,
323
+ scale=False,
324
+ name="Block35_3_Branch_2_Conv2d_0b_3x3_BatchNorm",
325
+ )(branch_2)
326
+ branch_2 = Activation("relu", name="Block35_3_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
327
+ branch_2 = Conv2D(
328
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_2_Conv2d_0c_3x3"
329
+ )(branch_2)
330
+ branch_2 = BatchNormalization(
331
+ axis=3,
332
+ momentum=0.995,
333
+ epsilon=0.001,
334
+ scale=False,
335
+ name="Block35_3_Branch_2_Conv2d_0c_3x3_BatchNorm",
336
+ )(branch_2)
337
+ branch_2 = Activation("relu", name="Block35_3_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
338
+ branches = [branch_0, branch_1, branch_2]
339
+ mixed = Concatenate(axis=3, name="Block35_3_Concatenate")(branches)
340
+ up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_3_Conv2d_1x1")(
341
+ mixed
342
+ )
343
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
344
+ x = add([x, up])
345
+ x = Activation("relu", name="Block35_3_Activation")(x)
346
+
347
+ branch_0 = Conv2D(
348
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_0_Conv2d_1x1"
349
+ )(x)
350
+ branch_0 = BatchNormalization(
351
+ axis=3,
352
+ momentum=0.995,
353
+ epsilon=0.001,
354
+ scale=False,
355
+ name="Block35_4_Branch_0_Conv2d_1x1_BatchNorm",
356
+ )(branch_0)
357
+ branch_0 = Activation("relu", name="Block35_4_Branch_0_Conv2d_1x1_Activation")(branch_0)
358
+ branch_1 = Conv2D(
359
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_1_Conv2d_0a_1x1"
360
+ )(x)
361
+ branch_1 = BatchNormalization(
362
+ axis=3,
363
+ momentum=0.995,
364
+ epsilon=0.001,
365
+ scale=False,
366
+ name="Block35_4_Branch_1_Conv2d_0a_1x1_BatchNorm",
367
+ )(branch_1)
368
+ branch_1 = Activation("relu", name="Block35_4_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
369
+ branch_1 = Conv2D(
370
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_1_Conv2d_0b_3x3"
371
+ )(branch_1)
372
+ branch_1 = BatchNormalization(
373
+ axis=3,
374
+ momentum=0.995,
375
+ epsilon=0.001,
376
+ scale=False,
377
+ name="Block35_4_Branch_1_Conv2d_0b_3x3_BatchNorm",
378
+ )(branch_1)
379
+ branch_1 = Activation("relu", name="Block35_4_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
380
+ branch_2 = Conv2D(
381
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_2_Conv2d_0a_1x1"
382
+ )(x)
383
+ branch_2 = BatchNormalization(
384
+ axis=3,
385
+ momentum=0.995,
386
+ epsilon=0.001,
387
+ scale=False,
388
+ name="Block35_4_Branch_2_Conv2d_0a_1x1_BatchNorm",
389
+ )(branch_2)
390
+ branch_2 = Activation("relu", name="Block35_4_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
391
+ branch_2 = Conv2D(
392
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_2_Conv2d_0b_3x3"
393
+ )(branch_2)
394
+ branch_2 = BatchNormalization(
395
+ axis=3,
396
+ momentum=0.995,
397
+ epsilon=0.001,
398
+ scale=False,
399
+ name="Block35_4_Branch_2_Conv2d_0b_3x3_BatchNorm",
400
+ )(branch_2)
401
+ branch_2 = Activation("relu", name="Block35_4_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
402
+ branch_2 = Conv2D(
403
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_2_Conv2d_0c_3x3"
404
+ )(branch_2)
405
+ branch_2 = BatchNormalization(
406
+ axis=3,
407
+ momentum=0.995,
408
+ epsilon=0.001,
409
+ scale=False,
410
+ name="Block35_4_Branch_2_Conv2d_0c_3x3_BatchNorm",
411
+ )(branch_2)
412
+ branch_2 = Activation("relu", name="Block35_4_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
413
+ branches = [branch_0, branch_1, branch_2]
414
+ mixed = Concatenate(axis=3, name="Block35_4_Concatenate")(branches)
415
+ up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_4_Conv2d_1x1")(
416
+ mixed
417
+ )
418
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
419
+ x = add([x, up])
420
+ x = Activation("relu", name="Block35_4_Activation")(x)
421
+
422
+ branch_0 = Conv2D(
423
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_0_Conv2d_1x1"
424
+ )(x)
425
+ branch_0 = BatchNormalization(
426
+ axis=3,
427
+ momentum=0.995,
428
+ epsilon=0.001,
429
+ scale=False,
430
+ name="Block35_5_Branch_0_Conv2d_1x1_BatchNorm",
431
+ )(branch_0)
432
+ branch_0 = Activation("relu", name="Block35_5_Branch_0_Conv2d_1x1_Activation")(branch_0)
433
+ branch_1 = Conv2D(
434
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_1_Conv2d_0a_1x1"
435
+ )(x)
436
+ branch_1 = BatchNormalization(
437
+ axis=3,
438
+ momentum=0.995,
439
+ epsilon=0.001,
440
+ scale=False,
441
+ name="Block35_5_Branch_1_Conv2d_0a_1x1_BatchNorm",
442
+ )(branch_1)
443
+ branch_1 = Activation("relu", name="Block35_5_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
444
+ branch_1 = Conv2D(
445
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_1_Conv2d_0b_3x3"
446
+ )(branch_1)
447
+ branch_1 = BatchNormalization(
448
+ axis=3,
449
+ momentum=0.995,
450
+ epsilon=0.001,
451
+ scale=False,
452
+ name="Block35_5_Branch_1_Conv2d_0b_3x3_BatchNorm",
453
+ )(branch_1)
454
+ branch_1 = Activation("relu", name="Block35_5_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
455
+ branch_2 = Conv2D(
456
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_2_Conv2d_0a_1x1"
457
+ )(x)
458
+ branch_2 = BatchNormalization(
459
+ axis=3,
460
+ momentum=0.995,
461
+ epsilon=0.001,
462
+ scale=False,
463
+ name="Block35_5_Branch_2_Conv2d_0a_1x1_BatchNorm",
464
+ )(branch_2)
465
+ branch_2 = Activation("relu", name="Block35_5_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
466
+ branch_2 = Conv2D(
467
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_2_Conv2d_0b_3x3"
468
+ )(branch_2)
469
+ branch_2 = BatchNormalization(
470
+ axis=3,
471
+ momentum=0.995,
472
+ epsilon=0.001,
473
+ scale=False,
474
+ name="Block35_5_Branch_2_Conv2d_0b_3x3_BatchNorm",
475
+ )(branch_2)
476
+ branch_2 = Activation("relu", name="Block35_5_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
477
+ branch_2 = Conv2D(
478
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_2_Conv2d_0c_3x3"
479
+ )(branch_2)
480
+ branch_2 = BatchNormalization(
481
+ axis=3,
482
+ momentum=0.995,
483
+ epsilon=0.001,
484
+ scale=False,
485
+ name="Block35_5_Branch_2_Conv2d_0c_3x3_BatchNorm",
486
+ )(branch_2)
487
+ branch_2 = Activation("relu", name="Block35_5_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
488
+ branches = [branch_0, branch_1, branch_2]
489
+ mixed = Concatenate(axis=3, name="Block35_5_Concatenate")(branches)
490
+ up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_5_Conv2d_1x1")(
491
+ mixed
492
+ )
493
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
494
+ x = add([x, up])
495
+ x = Activation("relu", name="Block35_5_Activation")(x)
496
+
497
+ # Mixed 6a (Reduction-A block):
498
+ branch_0 = Conv2D(
499
+ 384, 3, strides=2, padding="valid", use_bias=False, name="Mixed_6a_Branch_0_Conv2d_1a_3x3"
500
+ )(x)
501
+ branch_0 = BatchNormalization(
502
+ axis=3,
503
+ momentum=0.995,
504
+ epsilon=0.001,
505
+ scale=False,
506
+ name="Mixed_6a_Branch_0_Conv2d_1a_3x3_BatchNorm",
507
+ )(branch_0)
508
+ branch_0 = Activation("relu", name="Mixed_6a_Branch_0_Conv2d_1a_3x3_Activation")(branch_0)
509
+ branch_1 = Conv2D(
510
+ 192, 1, strides=1, padding="same", use_bias=False, name="Mixed_6a_Branch_1_Conv2d_0a_1x1"
511
+ )(x)
512
+ branch_1 = BatchNormalization(
513
+ axis=3,
514
+ momentum=0.995,
515
+ epsilon=0.001,
516
+ scale=False,
517
+ name="Mixed_6a_Branch_1_Conv2d_0a_1x1_BatchNorm",
518
+ )(branch_1)
519
+ branch_1 = Activation("relu", name="Mixed_6a_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
520
+ branch_1 = Conv2D(
521
+ 192, 3, strides=1, padding="same", use_bias=False, name="Mixed_6a_Branch_1_Conv2d_0b_3x3"
522
+ )(branch_1)
523
+ branch_1 = BatchNormalization(
524
+ axis=3,
525
+ momentum=0.995,
526
+ epsilon=0.001,
527
+ scale=False,
528
+ name="Mixed_6a_Branch_1_Conv2d_0b_3x3_BatchNorm",
529
+ )(branch_1)
530
+ branch_1 = Activation("relu", name="Mixed_6a_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
531
+ branch_1 = Conv2D(
532
+ 256, 3, strides=2, padding="valid", use_bias=False, name="Mixed_6a_Branch_1_Conv2d_1a_3x3"
533
+ )(branch_1)
534
+ branch_1 = BatchNormalization(
535
+ axis=3,
536
+ momentum=0.995,
537
+ epsilon=0.001,
538
+ scale=False,
539
+ name="Mixed_6a_Branch_1_Conv2d_1a_3x3_BatchNorm",
540
+ )(branch_1)
541
+ branch_1 = Activation("relu", name="Mixed_6a_Branch_1_Conv2d_1a_3x3_Activation")(branch_1)
542
+ branch_pool = MaxPooling2D(
543
+ 3, strides=2, padding="valid", name="Mixed_6a_Branch_2_MaxPool_1a_3x3"
544
+ )(x)
545
+ branches = [branch_0, branch_1, branch_pool]
546
+ x = Concatenate(axis=3, name="Mixed_6a")(branches)
547
+
548
+ # 10x Block17 (Inception-ResNet-B block):
549
+ branch_0 = Conv2D(
550
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_1_Branch_0_Conv2d_1x1"
551
+ )(x)
552
+ branch_0 = BatchNormalization(
553
+ axis=3,
554
+ momentum=0.995,
555
+ epsilon=0.001,
556
+ scale=False,
557
+ name="Block17_1_Branch_0_Conv2d_1x1_BatchNorm",
558
+ )(branch_0)
559
+ branch_0 = Activation("relu", name="Block17_1_Branch_0_Conv2d_1x1_Activation")(branch_0)
560
+ branch_1 = Conv2D(
561
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_1_Branch_1_Conv2d_0a_1x1"
562
+ )(x)
563
+ branch_1 = BatchNormalization(
564
+ axis=3,
565
+ momentum=0.995,
566
+ epsilon=0.001,
567
+ scale=False,
568
+ name="Block17_1_Branch_1_Conv2d_0a_1x1_BatchNorm",
569
+ )(branch_1)
570
+ branch_1 = Activation("relu", name="Block17_1_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
571
+ branch_1 = Conv2D(
572
+ 128,
573
+ [1, 7],
574
+ strides=1,
575
+ padding="same",
576
+ use_bias=False,
577
+ name="Block17_1_Branch_1_Conv2d_0b_1x7",
578
+ )(branch_1)
579
+ branch_1 = BatchNormalization(
580
+ axis=3,
581
+ momentum=0.995,
582
+ epsilon=0.001,
583
+ scale=False,
584
+ name="Block17_1_Branch_1_Conv2d_0b_1x7_BatchNorm",
585
+ )(branch_1)
586
+ branch_1 = Activation("relu", name="Block17_1_Branch_1_Conv2d_0b_1x7_Activation")(branch_1)
587
+ branch_1 = Conv2D(
588
+ 128,
589
+ [7, 1],
590
+ strides=1,
591
+ padding="same",
592
+ use_bias=False,
593
+ name="Block17_1_Branch_1_Conv2d_0c_7x1",
594
+ )(branch_1)
595
+ branch_1 = BatchNormalization(
596
+ axis=3,
597
+ momentum=0.995,
598
+ epsilon=0.001,
599
+ scale=False,
600
+ name="Block17_1_Branch_1_Conv2d_0c_7x1_BatchNorm",
601
+ )(branch_1)
602
+ branch_1 = Activation("relu", name="Block17_1_Branch_1_Conv2d_0c_7x1_Activation")(branch_1)
603
+ branches = [branch_0, branch_1]
604
+ mixed = Concatenate(axis=3, name="Block17_1_Concatenate")(branches)
605
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_1_Conv2d_1x1")(
606
+ mixed
607
+ )
608
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
609
+ x = add([x, up])
610
+ x = Activation("relu", name="Block17_1_Activation")(x)
611
+
612
+ branch_0 = Conv2D(
613
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_2_Branch_0_Conv2d_1x1"
614
+ )(x)
615
+ branch_0 = BatchNormalization(
616
+ axis=3,
617
+ momentum=0.995,
618
+ epsilon=0.001,
619
+ scale=False,
620
+ name="Block17_2_Branch_0_Conv2d_1x1_BatchNorm",
621
+ )(branch_0)
622
+ branch_0 = Activation("relu", name="Block17_2_Branch_0_Conv2d_1x1_Activation")(branch_0)
623
+ branch_1 = Conv2D(
624
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_2_Branch_2_Conv2d_0a_1x1"
625
+ )(x)
626
+ branch_1 = BatchNormalization(
627
+ axis=3,
628
+ momentum=0.995,
629
+ epsilon=0.001,
630
+ scale=False,
631
+ name="Block17_2_Branch_2_Conv2d_0a_1x1_BatchNorm",
632
+ )(branch_1)
633
+ branch_1 = Activation("relu", name="Block17_2_Branch_2_Conv2d_0a_1x1_Activation")(branch_1)
634
+ branch_1 = Conv2D(
635
+ 128,
636
+ [1, 7],
637
+ strides=1,
638
+ padding="same",
639
+ use_bias=False,
640
+ name="Block17_2_Branch_2_Conv2d_0b_1x7",
641
+ )(branch_1)
642
+ branch_1 = BatchNormalization(
643
+ axis=3,
644
+ momentum=0.995,
645
+ epsilon=0.001,
646
+ scale=False,
647
+ name="Block17_2_Branch_2_Conv2d_0b_1x7_BatchNorm",
648
+ )(branch_1)
649
+ branch_1 = Activation("relu", name="Block17_2_Branch_2_Conv2d_0b_1x7_Activation")(branch_1)
650
+ branch_1 = Conv2D(
651
+ 128,
652
+ [7, 1],
653
+ strides=1,
654
+ padding="same",
655
+ use_bias=False,
656
+ name="Block17_2_Branch_2_Conv2d_0c_7x1",
657
+ )(branch_1)
658
+ branch_1 = BatchNormalization(
659
+ axis=3,
660
+ momentum=0.995,
661
+ epsilon=0.001,
662
+ scale=False,
663
+ name="Block17_2_Branch_2_Conv2d_0c_7x1_BatchNorm",
664
+ )(branch_1)
665
+ branch_1 = Activation("relu", name="Block17_2_Branch_2_Conv2d_0c_7x1_Activation")(branch_1)
666
+ branches = [branch_0, branch_1]
667
+ mixed = Concatenate(axis=3, name="Block17_2_Concatenate")(branches)
668
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_2_Conv2d_1x1")(
669
+ mixed
670
+ )
671
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
672
+ x = add([x, up])
673
+ x = Activation("relu", name="Block17_2_Activation")(x)
674
+
675
+ branch_0 = Conv2D(
676
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_3_Branch_0_Conv2d_1x1"
677
+ )(x)
678
+ branch_0 = BatchNormalization(
679
+ axis=3,
680
+ momentum=0.995,
681
+ epsilon=0.001,
682
+ scale=False,
683
+ name="Block17_3_Branch_0_Conv2d_1x1_BatchNorm",
684
+ )(branch_0)
685
+ branch_0 = Activation("relu", name="Block17_3_Branch_0_Conv2d_1x1_Activation")(branch_0)
686
+ branch_1 = Conv2D(
687
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_3_Branch_3_Conv2d_0a_1x1"
688
+ )(x)
689
+ branch_1 = BatchNormalization(
690
+ axis=3,
691
+ momentum=0.995,
692
+ epsilon=0.001,
693
+ scale=False,
694
+ name="Block17_3_Branch_3_Conv2d_0a_1x1_BatchNorm",
695
+ )(branch_1)
696
+ branch_1 = Activation("relu", name="Block17_3_Branch_3_Conv2d_0a_1x1_Activation")(branch_1)
697
+ branch_1 = Conv2D(
698
+ 128,
699
+ [1, 7],
700
+ strides=1,
701
+ padding="same",
702
+ use_bias=False,
703
+ name="Block17_3_Branch_3_Conv2d_0b_1x7",
704
+ )(branch_1)
705
+ branch_1 = BatchNormalization(
706
+ axis=3,
707
+ momentum=0.995,
708
+ epsilon=0.001,
709
+ scale=False,
710
+ name="Block17_3_Branch_3_Conv2d_0b_1x7_BatchNorm",
711
+ )(branch_1)
712
+ branch_1 = Activation("relu", name="Block17_3_Branch_3_Conv2d_0b_1x7_Activation")(branch_1)
713
+ branch_1 = Conv2D(
714
+ 128,
715
+ [7, 1],
716
+ strides=1,
717
+ padding="same",
718
+ use_bias=False,
719
+ name="Block17_3_Branch_3_Conv2d_0c_7x1",
720
+ )(branch_1)
721
+ branch_1 = BatchNormalization(
722
+ axis=3,
723
+ momentum=0.995,
724
+ epsilon=0.001,
725
+ scale=False,
726
+ name="Block17_3_Branch_3_Conv2d_0c_7x1_BatchNorm",
727
+ )(branch_1)
728
+ branch_1 = Activation("relu", name="Block17_3_Branch_3_Conv2d_0c_7x1_Activation")(branch_1)
729
+ branches = [branch_0, branch_1]
730
+ mixed = Concatenate(axis=3, name="Block17_3_Concatenate")(branches)
731
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_3_Conv2d_1x1")(
732
+ mixed
733
+ )
734
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
735
+ x = add([x, up])
736
+ x = Activation("relu", name="Block17_3_Activation")(x)
737
+
738
+ branch_0 = Conv2D(
739
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_4_Branch_0_Conv2d_1x1"
740
+ )(x)
741
+ branch_0 = BatchNormalization(
742
+ axis=3,
743
+ momentum=0.995,
744
+ epsilon=0.001,
745
+ scale=False,
746
+ name="Block17_4_Branch_0_Conv2d_1x1_BatchNorm",
747
+ )(branch_0)
748
+ branch_0 = Activation("relu", name="Block17_4_Branch_0_Conv2d_1x1_Activation")(branch_0)
749
+ branch_1 = Conv2D(
750
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_4_Branch_4_Conv2d_0a_1x1"
751
+ )(x)
752
+ branch_1 = BatchNormalization(
753
+ axis=3,
754
+ momentum=0.995,
755
+ epsilon=0.001,
756
+ scale=False,
757
+ name="Block17_4_Branch_4_Conv2d_0a_1x1_BatchNorm",
758
+ )(branch_1)
759
+ branch_1 = Activation("relu", name="Block17_4_Branch_4_Conv2d_0a_1x1_Activation")(branch_1)
760
+ branch_1 = Conv2D(
761
+ 128,
762
+ [1, 7],
763
+ strides=1,
764
+ padding="same",
765
+ use_bias=False,
766
+ name="Block17_4_Branch_4_Conv2d_0b_1x7",
767
+ )(branch_1)
768
+ branch_1 = BatchNormalization(
769
+ axis=3,
770
+ momentum=0.995,
771
+ epsilon=0.001,
772
+ scale=False,
773
+ name="Block17_4_Branch_4_Conv2d_0b_1x7_BatchNorm",
774
+ )(branch_1)
775
+ branch_1 = Activation("relu", name="Block17_4_Branch_4_Conv2d_0b_1x7_Activation")(branch_1)
776
+ branch_1 = Conv2D(
777
+ 128,
778
+ [7, 1],
779
+ strides=1,
780
+ padding="same",
781
+ use_bias=False,
782
+ name="Block17_4_Branch_4_Conv2d_0c_7x1",
783
+ )(branch_1)
784
+ branch_1 = BatchNormalization(
785
+ axis=3,
786
+ momentum=0.995,
787
+ epsilon=0.001,
788
+ scale=False,
789
+ name="Block17_4_Branch_4_Conv2d_0c_7x1_BatchNorm",
790
+ )(branch_1)
791
+ branch_1 = Activation("relu", name="Block17_4_Branch_4_Conv2d_0c_7x1_Activation")(branch_1)
792
+ branches = [branch_0, branch_1]
793
+ mixed = Concatenate(axis=3, name="Block17_4_Concatenate")(branches)
794
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_4_Conv2d_1x1")(
795
+ mixed
796
+ )
797
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
798
+ x = add([x, up])
799
+ x = Activation("relu", name="Block17_4_Activation")(x)
800
+
801
+ branch_0 = Conv2D(
802
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_5_Branch_0_Conv2d_1x1"
803
+ )(x)
804
+ branch_0 = BatchNormalization(
805
+ axis=3,
806
+ momentum=0.995,
807
+ epsilon=0.001,
808
+ scale=False,
809
+ name="Block17_5_Branch_0_Conv2d_1x1_BatchNorm",
810
+ )(branch_0)
811
+ branch_0 = Activation("relu", name="Block17_5_Branch_0_Conv2d_1x1_Activation")(branch_0)
812
+ branch_1 = Conv2D(
813
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_5_Branch_5_Conv2d_0a_1x1"
814
+ )(x)
815
+ branch_1 = BatchNormalization(
816
+ axis=3,
817
+ momentum=0.995,
818
+ epsilon=0.001,
819
+ scale=False,
820
+ name="Block17_5_Branch_5_Conv2d_0a_1x1_BatchNorm",
821
+ )(branch_1)
822
+ branch_1 = Activation("relu", name="Block17_5_Branch_5_Conv2d_0a_1x1_Activation")(branch_1)
823
+ branch_1 = Conv2D(
824
+ 128,
825
+ [1, 7],
826
+ strides=1,
827
+ padding="same",
828
+ use_bias=False,
829
+ name="Block17_5_Branch_5_Conv2d_0b_1x7",
830
+ )(branch_1)
831
+ branch_1 = BatchNormalization(
832
+ axis=3,
833
+ momentum=0.995,
834
+ epsilon=0.001,
835
+ scale=False,
836
+ name="Block17_5_Branch_5_Conv2d_0b_1x7_BatchNorm",
837
+ )(branch_1)
838
+ branch_1 = Activation("relu", name="Block17_5_Branch_5_Conv2d_0b_1x7_Activation")(branch_1)
839
+ branch_1 = Conv2D(
840
+ 128,
841
+ [7, 1],
842
+ strides=1,
843
+ padding="same",
844
+ use_bias=False,
845
+ name="Block17_5_Branch_5_Conv2d_0c_7x1",
846
+ )(branch_1)
847
+ branch_1 = BatchNormalization(
848
+ axis=3,
849
+ momentum=0.995,
850
+ epsilon=0.001,
851
+ scale=False,
852
+ name="Block17_5_Branch_5_Conv2d_0c_7x1_BatchNorm",
853
+ )(branch_1)
854
+ branch_1 = Activation("relu", name="Block17_5_Branch_5_Conv2d_0c_7x1_Activation")(branch_1)
855
+ branches = [branch_0, branch_1]
856
+ mixed = Concatenate(axis=3, name="Block17_5_Concatenate")(branches)
857
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_5_Conv2d_1x1")(
858
+ mixed
859
+ )
860
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
861
+ x = add([x, up])
862
+ x = Activation("relu", name="Block17_5_Activation")(x)
863
+
864
+ branch_0 = Conv2D(
865
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_6_Branch_0_Conv2d_1x1"
866
+ )(x)
867
+ branch_0 = BatchNormalization(
868
+ axis=3,
869
+ momentum=0.995,
870
+ epsilon=0.001,
871
+ scale=False,
872
+ name="Block17_6_Branch_0_Conv2d_1x1_BatchNorm",
873
+ )(branch_0)
874
+ branch_0 = Activation("relu", name="Block17_6_Branch_0_Conv2d_1x1_Activation")(branch_0)
875
+ branch_1 = Conv2D(
876
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_6_Branch_6_Conv2d_0a_1x1"
877
+ )(x)
878
+ branch_1 = BatchNormalization(
879
+ axis=3,
880
+ momentum=0.995,
881
+ epsilon=0.001,
882
+ scale=False,
883
+ name="Block17_6_Branch_6_Conv2d_0a_1x1_BatchNorm",
884
+ )(branch_1)
885
+ branch_1 = Activation("relu", name="Block17_6_Branch_6_Conv2d_0a_1x1_Activation")(branch_1)
886
+ branch_1 = Conv2D(
887
+ 128,
888
+ [1, 7],
889
+ strides=1,
890
+ padding="same",
891
+ use_bias=False,
892
+ name="Block17_6_Branch_6_Conv2d_0b_1x7",
893
+ )(branch_1)
894
+ branch_1 = BatchNormalization(
895
+ axis=3,
896
+ momentum=0.995,
897
+ epsilon=0.001,
898
+ scale=False,
899
+ name="Block17_6_Branch_6_Conv2d_0b_1x7_BatchNorm",
900
+ )(branch_1)
901
+ branch_1 = Activation("relu", name="Block17_6_Branch_6_Conv2d_0b_1x7_Activation")(branch_1)
902
+ branch_1 = Conv2D(
903
+ 128,
904
+ [7, 1],
905
+ strides=1,
906
+ padding="same",
907
+ use_bias=False,
908
+ name="Block17_6_Branch_6_Conv2d_0c_7x1",
909
+ )(branch_1)
910
+ branch_1 = BatchNormalization(
911
+ axis=3,
912
+ momentum=0.995,
913
+ epsilon=0.001,
914
+ scale=False,
915
+ name="Block17_6_Branch_6_Conv2d_0c_7x1_BatchNorm",
916
+ )(branch_1)
917
+ branch_1 = Activation("relu", name="Block17_6_Branch_6_Conv2d_0c_7x1_Activation")(branch_1)
918
+ branches = [branch_0, branch_1]
919
+ mixed = Concatenate(axis=3, name="Block17_6_Concatenate")(branches)
920
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_6_Conv2d_1x1")(
921
+ mixed
922
+ )
923
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
924
+ x = add([x, up])
925
+ x = Activation("relu", name="Block17_6_Activation")(x)
926
+
927
+ branch_0 = Conv2D(
928
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_7_Branch_0_Conv2d_1x1"
929
+ )(x)
930
+ branch_0 = BatchNormalization(
931
+ axis=3,
932
+ momentum=0.995,
933
+ epsilon=0.001,
934
+ scale=False,
935
+ name="Block17_7_Branch_0_Conv2d_1x1_BatchNorm",
936
+ )(branch_0)
937
+ branch_0 = Activation("relu", name="Block17_7_Branch_0_Conv2d_1x1_Activation")(branch_0)
938
+ branch_1 = Conv2D(
939
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_7_Branch_7_Conv2d_0a_1x1"
940
+ )(x)
941
+ branch_1 = BatchNormalization(
942
+ axis=3,
943
+ momentum=0.995,
944
+ epsilon=0.001,
945
+ scale=False,
946
+ name="Block17_7_Branch_7_Conv2d_0a_1x1_BatchNorm",
947
+ )(branch_1)
948
+ branch_1 = Activation("relu", name="Block17_7_Branch_7_Conv2d_0a_1x1_Activation")(branch_1)
949
+ branch_1 = Conv2D(
950
+ 128,
951
+ [1, 7],
952
+ strides=1,
953
+ padding="same",
954
+ use_bias=False,
955
+ name="Block17_7_Branch_7_Conv2d_0b_1x7",
956
+ )(branch_1)
957
+ branch_1 = BatchNormalization(
958
+ axis=3,
959
+ momentum=0.995,
960
+ epsilon=0.001,
961
+ scale=False,
962
+ name="Block17_7_Branch_7_Conv2d_0b_1x7_BatchNorm",
963
+ )(branch_1)
964
+ branch_1 = Activation("relu", name="Block17_7_Branch_7_Conv2d_0b_1x7_Activation")(branch_1)
965
+ branch_1 = Conv2D(
966
+ 128,
967
+ [7, 1],
968
+ strides=1,
969
+ padding="same",
970
+ use_bias=False,
971
+ name="Block17_7_Branch_7_Conv2d_0c_7x1",
972
+ )(branch_1)
973
+ branch_1 = BatchNormalization(
974
+ axis=3,
975
+ momentum=0.995,
976
+ epsilon=0.001,
977
+ scale=False,
978
+ name="Block17_7_Branch_7_Conv2d_0c_7x1_BatchNorm",
979
+ )(branch_1)
980
+ branch_1 = Activation("relu", name="Block17_7_Branch_7_Conv2d_0c_7x1_Activation")(branch_1)
981
+ branches = [branch_0, branch_1]
982
+ mixed = Concatenate(axis=3, name="Block17_7_Concatenate")(branches)
983
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_7_Conv2d_1x1")(
984
+ mixed
985
+ )
986
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
987
+ x = add([x, up])
988
+ x = Activation("relu", name="Block17_7_Activation")(x)
989
+
990
+ branch_0 = Conv2D(
991
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_8_Branch_0_Conv2d_1x1"
992
+ )(x)
993
+ branch_0 = BatchNormalization(
994
+ axis=3,
995
+ momentum=0.995,
996
+ epsilon=0.001,
997
+ scale=False,
998
+ name="Block17_8_Branch_0_Conv2d_1x1_BatchNorm",
999
+ )(branch_0)
1000
+ branch_0 = Activation("relu", name="Block17_8_Branch_0_Conv2d_1x1_Activation")(branch_0)
1001
+ branch_1 = Conv2D(
1002
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_8_Branch_8_Conv2d_0a_1x1"
1003
+ )(x)
1004
+ branch_1 = BatchNormalization(
1005
+ axis=3,
1006
+ momentum=0.995,
1007
+ epsilon=0.001,
1008
+ scale=False,
1009
+ name="Block17_8_Branch_8_Conv2d_0a_1x1_BatchNorm",
1010
+ )(branch_1)
1011
+ branch_1 = Activation("relu", name="Block17_8_Branch_8_Conv2d_0a_1x1_Activation")(branch_1)
1012
+ branch_1 = Conv2D(
1013
+ 128,
1014
+ [1, 7],
1015
+ strides=1,
1016
+ padding="same",
1017
+ use_bias=False,
1018
+ name="Block17_8_Branch_8_Conv2d_0b_1x7",
1019
+ )(branch_1)
1020
+ branch_1 = BatchNormalization(
1021
+ axis=3,
1022
+ momentum=0.995,
1023
+ epsilon=0.001,
1024
+ scale=False,
1025
+ name="Block17_8_Branch_8_Conv2d_0b_1x7_BatchNorm",
1026
+ )(branch_1)
1027
+ branch_1 = Activation("relu", name="Block17_8_Branch_8_Conv2d_0b_1x7_Activation")(branch_1)
1028
+ branch_1 = Conv2D(
1029
+ 128,
1030
+ [7, 1],
1031
+ strides=1,
1032
+ padding="same",
1033
+ use_bias=False,
1034
+ name="Block17_8_Branch_8_Conv2d_0c_7x1",
1035
+ )(branch_1)
1036
+ branch_1 = BatchNormalization(
1037
+ axis=3,
1038
+ momentum=0.995,
1039
+ epsilon=0.001,
1040
+ scale=False,
1041
+ name="Block17_8_Branch_8_Conv2d_0c_7x1_BatchNorm",
1042
+ )(branch_1)
1043
+ branch_1 = Activation("relu", name="Block17_8_Branch_8_Conv2d_0c_7x1_Activation")(branch_1)
1044
+ branches = [branch_0, branch_1]
1045
+ mixed = Concatenate(axis=3, name="Block17_8_Concatenate")(branches)
1046
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_8_Conv2d_1x1")(
1047
+ mixed
1048
+ )
1049
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
1050
+ x = add([x, up])
1051
+ x = Activation("relu", name="Block17_8_Activation")(x)
1052
+
1053
+ branch_0 = Conv2D(
1054
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_9_Branch_0_Conv2d_1x1"
1055
+ )(x)
1056
+ branch_0 = BatchNormalization(
1057
+ axis=3,
1058
+ momentum=0.995,
1059
+ epsilon=0.001,
1060
+ scale=False,
1061
+ name="Block17_9_Branch_0_Conv2d_1x1_BatchNorm",
1062
+ )(branch_0)
1063
+ branch_0 = Activation("relu", name="Block17_9_Branch_0_Conv2d_1x1_Activation")(branch_0)
1064
+ branch_1 = Conv2D(
1065
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_9_Branch_9_Conv2d_0a_1x1"
1066
+ )(x)
1067
+ branch_1 = BatchNormalization(
1068
+ axis=3,
1069
+ momentum=0.995,
1070
+ epsilon=0.001,
1071
+ scale=False,
1072
+ name="Block17_9_Branch_9_Conv2d_0a_1x1_BatchNorm",
1073
+ )(branch_1)
1074
+ branch_1 = Activation("relu", name="Block17_9_Branch_9_Conv2d_0a_1x1_Activation")(branch_1)
1075
+ branch_1 = Conv2D(
1076
+ 128,
1077
+ [1, 7],
1078
+ strides=1,
1079
+ padding="same",
1080
+ use_bias=False,
1081
+ name="Block17_9_Branch_9_Conv2d_0b_1x7",
1082
+ )(branch_1)
1083
+ branch_1 = BatchNormalization(
1084
+ axis=3,
1085
+ momentum=0.995,
1086
+ epsilon=0.001,
1087
+ scale=False,
1088
+ name="Block17_9_Branch_9_Conv2d_0b_1x7_BatchNorm",
1089
+ )(branch_1)
1090
+ branch_1 = Activation("relu", name="Block17_9_Branch_9_Conv2d_0b_1x7_Activation")(branch_1)
1091
+ branch_1 = Conv2D(
1092
+ 128,
1093
+ [7, 1],
1094
+ strides=1,
1095
+ padding="same",
1096
+ use_bias=False,
1097
+ name="Block17_9_Branch_9_Conv2d_0c_7x1",
1098
+ )(branch_1)
1099
+ branch_1 = BatchNormalization(
1100
+ axis=3,
1101
+ momentum=0.995,
1102
+ epsilon=0.001,
1103
+ scale=False,
1104
+ name="Block17_9_Branch_9_Conv2d_0c_7x1_BatchNorm",
1105
+ )(branch_1)
1106
+ branch_1 = Activation("relu", name="Block17_9_Branch_9_Conv2d_0c_7x1_Activation")(branch_1)
1107
+ branches = [branch_0, branch_1]
1108
+ mixed = Concatenate(axis=3, name="Block17_9_Concatenate")(branches)
1109
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_9_Conv2d_1x1")(
1110
+ mixed
1111
+ )
1112
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
1113
+ x = add([x, up])
1114
+ x = Activation("relu", name="Block17_9_Activation")(x)
1115
+
1116
+ branch_0 = Conv2D(
1117
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_10_Branch_0_Conv2d_1x1"
1118
+ )(x)
1119
+ branch_0 = BatchNormalization(
1120
+ axis=3,
1121
+ momentum=0.995,
1122
+ epsilon=0.001,
1123
+ scale=False,
1124
+ name="Block17_10_Branch_0_Conv2d_1x1_BatchNorm",
1125
+ )(branch_0)
1126
+ branch_0 = Activation("relu", name="Block17_10_Branch_0_Conv2d_1x1_Activation")(branch_0)
1127
+ branch_1 = Conv2D(
1128
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_10_Branch_10_Conv2d_0a_1x1"
1129
+ )(x)
1130
+ branch_1 = BatchNormalization(
1131
+ axis=3,
1132
+ momentum=0.995,
1133
+ epsilon=0.001,
1134
+ scale=False,
1135
+ name="Block17_10_Branch_10_Conv2d_0a_1x1_BatchNorm",
1136
+ )(branch_1)
1137
+ branch_1 = Activation("relu", name="Block17_10_Branch_10_Conv2d_0a_1x1_Activation")(branch_1)
1138
+ branch_1 = Conv2D(
1139
+ 128,
1140
+ [1, 7],
1141
+ strides=1,
1142
+ padding="same",
1143
+ use_bias=False,
1144
+ name="Block17_10_Branch_10_Conv2d_0b_1x7",
1145
+ )(branch_1)
1146
+ branch_1 = BatchNormalization(
1147
+ axis=3,
1148
+ momentum=0.995,
1149
+ epsilon=0.001,
1150
+ scale=False,
1151
+ name="Block17_10_Branch_10_Conv2d_0b_1x7_BatchNorm",
1152
+ )(branch_1)
1153
+ branch_1 = Activation("relu", name="Block17_10_Branch_10_Conv2d_0b_1x7_Activation")(branch_1)
1154
+ branch_1 = Conv2D(
1155
+ 128,
1156
+ [7, 1],
1157
+ strides=1,
1158
+ padding="same",
1159
+ use_bias=False,
1160
+ name="Block17_10_Branch_10_Conv2d_0c_7x1",
1161
+ )(branch_1)
1162
+ branch_1 = BatchNormalization(
1163
+ axis=3,
1164
+ momentum=0.995,
1165
+ epsilon=0.001,
1166
+ scale=False,
1167
+ name="Block17_10_Branch_10_Conv2d_0c_7x1_BatchNorm",
1168
+ )(branch_1)
1169
+ branch_1 = Activation("relu", name="Block17_10_Branch_10_Conv2d_0c_7x1_Activation")(branch_1)
1170
+ branches = [branch_0, branch_1]
1171
+ mixed = Concatenate(axis=3, name="Block17_10_Concatenate")(branches)
1172
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_10_Conv2d_1x1")(
1173
+ mixed
1174
+ )
1175
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
1176
+ x = add([x, up])
1177
+ x = Activation("relu", name="Block17_10_Activation")(x)
1178
+
1179
+ # Mixed 7a (Reduction-B block): 8 x 8 x 2080
1180
+ branch_0 = Conv2D(
1181
+ 256, 1, strides=1, padding="same", use_bias=False, name="Mixed_7a_Branch_0_Conv2d_0a_1x1"
1182
+ )(x)
1183
+ branch_0 = BatchNormalization(
1184
+ axis=3,
1185
+ momentum=0.995,
1186
+ epsilon=0.001,
1187
+ scale=False,
1188
+ name="Mixed_7a_Branch_0_Conv2d_0a_1x1_BatchNorm",
1189
+ )(branch_0)
1190
+ branch_0 = Activation("relu", name="Mixed_7a_Branch_0_Conv2d_0a_1x1_Activation")(branch_0)
1191
+ branch_0 = Conv2D(
1192
+ 384, 3, strides=2, padding="valid", use_bias=False, name="Mixed_7a_Branch_0_Conv2d_1a_3x3"
1193
+ )(branch_0)
1194
+ branch_0 = BatchNormalization(
1195
+ axis=3,
1196
+ momentum=0.995,
1197
+ epsilon=0.001,
1198
+ scale=False,
1199
+ name="Mixed_7a_Branch_0_Conv2d_1a_3x3_BatchNorm",
1200
+ )(branch_0)
1201
+ branch_0 = Activation("relu", name="Mixed_7a_Branch_0_Conv2d_1a_3x3_Activation")(branch_0)
1202
+ branch_1 = Conv2D(
1203
+ 256, 1, strides=1, padding="same", use_bias=False, name="Mixed_7a_Branch_1_Conv2d_0a_1x1"
1204
+ )(x)
1205
+ branch_1 = BatchNormalization(
1206
+ axis=3,
1207
+ momentum=0.995,
1208
+ epsilon=0.001,
1209
+ scale=False,
1210
+ name="Mixed_7a_Branch_1_Conv2d_0a_1x1_BatchNorm",
1211
+ )(branch_1)
1212
+ branch_1 = Activation("relu", name="Mixed_7a_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
1213
+ branch_1 = Conv2D(
1214
+ 256, 3, strides=2, padding="valid", use_bias=False, name="Mixed_7a_Branch_1_Conv2d_1a_3x3"
1215
+ )(branch_1)
1216
+ branch_1 = BatchNormalization(
1217
+ axis=3,
1218
+ momentum=0.995,
1219
+ epsilon=0.001,
1220
+ scale=False,
1221
+ name="Mixed_7a_Branch_1_Conv2d_1a_3x3_BatchNorm",
1222
+ )(branch_1)
1223
+ branch_1 = Activation("relu", name="Mixed_7a_Branch_1_Conv2d_1a_3x3_Activation")(branch_1)
1224
+ branch_2 = Conv2D(
1225
+ 256, 1, strides=1, padding="same", use_bias=False, name="Mixed_7a_Branch_2_Conv2d_0a_1x1"
1226
+ )(x)
1227
+ branch_2 = BatchNormalization(
1228
+ axis=3,
1229
+ momentum=0.995,
1230
+ epsilon=0.001,
1231
+ scale=False,
1232
+ name="Mixed_7a_Branch_2_Conv2d_0a_1x1_BatchNorm",
1233
+ )(branch_2)
1234
+ branch_2 = Activation("relu", name="Mixed_7a_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
1235
+ branch_2 = Conv2D(
1236
+ 256, 3, strides=1, padding="same", use_bias=False, name="Mixed_7a_Branch_2_Conv2d_0b_3x3"
1237
+ )(branch_2)
1238
+ branch_2 = BatchNormalization(
1239
+ axis=3,
1240
+ momentum=0.995,
1241
+ epsilon=0.001,
1242
+ scale=False,
1243
+ name="Mixed_7a_Branch_2_Conv2d_0b_3x3_BatchNorm",
1244
+ )(branch_2)
1245
+ branch_2 = Activation("relu", name="Mixed_7a_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
1246
+ branch_2 = Conv2D(
1247
+ 256, 3, strides=2, padding="valid", use_bias=False, name="Mixed_7a_Branch_2_Conv2d_1a_3x3"
1248
+ )(branch_2)
1249
+ branch_2 = BatchNormalization(
1250
+ axis=3,
1251
+ momentum=0.995,
1252
+ epsilon=0.001,
1253
+ scale=False,
1254
+ name="Mixed_7a_Branch_2_Conv2d_1a_3x3_BatchNorm",
1255
+ )(branch_2)
1256
+ branch_2 = Activation("relu", name="Mixed_7a_Branch_2_Conv2d_1a_3x3_Activation")(branch_2)
1257
+ branch_pool = MaxPooling2D(
1258
+ 3, strides=2, padding="valid", name="Mixed_7a_Branch_3_MaxPool_1a_3x3"
1259
+ )(x)
1260
+ branches = [branch_0, branch_1, branch_2, branch_pool]
1261
+ x = Concatenate(axis=3, name="Mixed_7a")(branches)
1262
+
1263
+ # 5x Block8 (Inception-ResNet-C block):
1264
+
1265
+ branch_0 = Conv2D(
1266
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_1_Branch_0_Conv2d_1x1"
1267
+ )(x)
1268
+ branch_0 = BatchNormalization(
1269
+ axis=3,
1270
+ momentum=0.995,
1271
+ epsilon=0.001,
1272
+ scale=False,
1273
+ name="Block8_1_Branch_0_Conv2d_1x1_BatchNorm",
1274
+ )(branch_0)
1275
+ branch_0 = Activation("relu", name="Block8_1_Branch_0_Conv2d_1x1_Activation")(branch_0)
1276
+ branch_1 = Conv2D(
1277
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_1_Branch_1_Conv2d_0a_1x1"
1278
+ )(x)
1279
+ branch_1 = BatchNormalization(
1280
+ axis=3,
1281
+ momentum=0.995,
1282
+ epsilon=0.001,
1283
+ scale=False,
1284
+ name="Block8_1_Branch_1_Conv2d_0a_1x1_BatchNorm",
1285
+ )(branch_1)
1286
+ branch_1 = Activation("relu", name="Block8_1_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
1287
+ branch_1 = Conv2D(
1288
+ 192,
1289
+ [1, 3],
1290
+ strides=1,
1291
+ padding="same",
1292
+ use_bias=False,
1293
+ name="Block8_1_Branch_1_Conv2d_0b_1x3",
1294
+ )(branch_1)
1295
+ branch_1 = BatchNormalization(
1296
+ axis=3,
1297
+ momentum=0.995,
1298
+ epsilon=0.001,
1299
+ scale=False,
1300
+ name="Block8_1_Branch_1_Conv2d_0b_1x3_BatchNorm",
1301
+ )(branch_1)
1302
+ branch_1 = Activation("relu", name="Block8_1_Branch_1_Conv2d_0b_1x3_Activation")(branch_1)
1303
+ branch_1 = Conv2D(
1304
+ 192,
1305
+ [3, 1],
1306
+ strides=1,
1307
+ padding="same",
1308
+ use_bias=False,
1309
+ name="Block8_1_Branch_1_Conv2d_0c_3x1",
1310
+ )(branch_1)
1311
+ branch_1 = BatchNormalization(
1312
+ axis=3,
1313
+ momentum=0.995,
1314
+ epsilon=0.001,
1315
+ scale=False,
1316
+ name="Block8_1_Branch_1_Conv2d_0c_3x1_BatchNorm",
1317
+ )(branch_1)
1318
+ branch_1 = Activation("relu", name="Block8_1_Branch_1_Conv2d_0c_3x1_Activation")(branch_1)
1319
+ branches = [branch_0, branch_1]
1320
+ mixed = Concatenate(axis=3, name="Block8_1_Concatenate")(branches)
1321
+ up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_1_Conv2d_1x1")(
1322
+ mixed
1323
+ )
1324
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
1325
+ x = add([x, up])
1326
+ x = Activation("relu", name="Block8_1_Activation")(x)
1327
+
1328
+ branch_0 = Conv2D(
1329
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_2_Branch_0_Conv2d_1x1"
1330
+ )(x)
1331
+ branch_0 = BatchNormalization(
1332
+ axis=3,
1333
+ momentum=0.995,
1334
+ epsilon=0.001,
1335
+ scale=False,
1336
+ name="Block8_2_Branch_0_Conv2d_1x1_BatchNorm",
1337
+ )(branch_0)
1338
+ branch_0 = Activation("relu", name="Block8_2_Branch_0_Conv2d_1x1_Activation")(branch_0)
1339
+ branch_1 = Conv2D(
1340
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_2_Branch_2_Conv2d_0a_1x1"
1341
+ )(x)
1342
+ branch_1 = BatchNormalization(
1343
+ axis=3,
1344
+ momentum=0.995,
1345
+ epsilon=0.001,
1346
+ scale=False,
1347
+ name="Block8_2_Branch_2_Conv2d_0a_1x1_BatchNorm",
1348
+ )(branch_1)
1349
+ branch_1 = Activation("relu", name="Block8_2_Branch_2_Conv2d_0a_1x1_Activation")(branch_1)
1350
+ branch_1 = Conv2D(
1351
+ 192,
1352
+ [1, 3],
1353
+ strides=1,
1354
+ padding="same",
1355
+ use_bias=False,
1356
+ name="Block8_2_Branch_2_Conv2d_0b_1x3",
1357
+ )(branch_1)
1358
+ branch_1 = BatchNormalization(
1359
+ axis=3,
1360
+ momentum=0.995,
1361
+ epsilon=0.001,
1362
+ scale=False,
1363
+ name="Block8_2_Branch_2_Conv2d_0b_1x3_BatchNorm",
1364
+ )(branch_1)
1365
+ branch_1 = Activation("relu", name="Block8_2_Branch_2_Conv2d_0b_1x3_Activation")(branch_1)
1366
+ branch_1 = Conv2D(
1367
+ 192,
1368
+ [3, 1],
1369
+ strides=1,
1370
+ padding="same",
1371
+ use_bias=False,
1372
+ name="Block8_2_Branch_2_Conv2d_0c_3x1",
1373
+ )(branch_1)
1374
+ branch_1 = BatchNormalization(
1375
+ axis=3,
1376
+ momentum=0.995,
1377
+ epsilon=0.001,
1378
+ scale=False,
1379
+ name="Block8_2_Branch_2_Conv2d_0c_3x1_BatchNorm",
1380
+ )(branch_1)
1381
+ branch_1 = Activation("relu", name="Block8_2_Branch_2_Conv2d_0c_3x1_Activation")(branch_1)
1382
+ branches = [branch_0, branch_1]
1383
+ mixed = Concatenate(axis=3, name="Block8_2_Concatenate")(branches)
1384
+ up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_2_Conv2d_1x1")(
1385
+ mixed
1386
+ )
1387
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
1388
+ x = add([x, up])
1389
+ x = Activation("relu", name="Block8_2_Activation")(x)
1390
+
1391
+ branch_0 = Conv2D(
1392
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_3_Branch_0_Conv2d_1x1"
1393
+ )(x)
1394
+ branch_0 = BatchNormalization(
1395
+ axis=3,
1396
+ momentum=0.995,
1397
+ epsilon=0.001,
1398
+ scale=False,
1399
+ name="Block8_3_Branch_0_Conv2d_1x1_BatchNorm",
1400
+ )(branch_0)
1401
+ branch_0 = Activation("relu", name="Block8_3_Branch_0_Conv2d_1x1_Activation")(branch_0)
1402
+ branch_1 = Conv2D(
1403
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_3_Branch_3_Conv2d_0a_1x1"
1404
+ )(x)
1405
+ branch_1 = BatchNormalization(
1406
+ axis=3,
1407
+ momentum=0.995,
1408
+ epsilon=0.001,
1409
+ scale=False,
1410
+ name="Block8_3_Branch_3_Conv2d_0a_1x1_BatchNorm",
1411
+ )(branch_1)
1412
+ branch_1 = Activation("relu", name="Block8_3_Branch_3_Conv2d_0a_1x1_Activation")(branch_1)
1413
+ branch_1 = Conv2D(
1414
+ 192,
1415
+ [1, 3],
1416
+ strides=1,
1417
+ padding="same",
1418
+ use_bias=False,
1419
+ name="Block8_3_Branch_3_Conv2d_0b_1x3",
1420
+ )(branch_1)
1421
+ branch_1 = BatchNormalization(
1422
+ axis=3,
1423
+ momentum=0.995,
1424
+ epsilon=0.001,
1425
+ scale=False,
1426
+ name="Block8_3_Branch_3_Conv2d_0b_1x3_BatchNorm",
1427
+ )(branch_1)
1428
+ branch_1 = Activation("relu", name="Block8_3_Branch_3_Conv2d_0b_1x3_Activation")(branch_1)
1429
+ branch_1 = Conv2D(
1430
+ 192,
1431
+ [3, 1],
1432
+ strides=1,
1433
+ padding="same",
1434
+ use_bias=False,
1435
+ name="Block8_3_Branch_3_Conv2d_0c_3x1",
1436
+ )(branch_1)
1437
+ branch_1 = BatchNormalization(
1438
+ axis=3,
1439
+ momentum=0.995,
1440
+ epsilon=0.001,
1441
+ scale=False,
1442
+ name="Block8_3_Branch_3_Conv2d_0c_3x1_BatchNorm",
1443
+ )(branch_1)
1444
+ branch_1 = Activation("relu", name="Block8_3_Branch_3_Conv2d_0c_3x1_Activation")(branch_1)
1445
+ branches = [branch_0, branch_1]
1446
+ mixed = Concatenate(axis=3, name="Block8_3_Concatenate")(branches)
1447
+ up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_3_Conv2d_1x1")(
1448
+ mixed
1449
+ )
1450
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
1451
+ x = add([x, up])
1452
+ x = Activation("relu", name="Block8_3_Activation")(x)
1453
+
1454
+ branch_0 = Conv2D(
1455
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_4_Branch_0_Conv2d_1x1"
1456
+ )(x)
1457
+ branch_0 = BatchNormalization(
1458
+ axis=3,
1459
+ momentum=0.995,
1460
+ epsilon=0.001,
1461
+ scale=False,
1462
+ name="Block8_4_Branch_0_Conv2d_1x1_BatchNorm",
1463
+ )(branch_0)
1464
+ branch_0 = Activation("relu", name="Block8_4_Branch_0_Conv2d_1x1_Activation")(branch_0)
1465
+ branch_1 = Conv2D(
1466
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_4_Branch_4_Conv2d_0a_1x1"
1467
+ )(x)
1468
+ branch_1 = BatchNormalization(
1469
+ axis=3,
1470
+ momentum=0.995,
1471
+ epsilon=0.001,
1472
+ scale=False,
1473
+ name="Block8_4_Branch_4_Conv2d_0a_1x1_BatchNorm",
1474
+ )(branch_1)
1475
+ branch_1 = Activation("relu", name="Block8_4_Branch_4_Conv2d_0a_1x1_Activation")(branch_1)
1476
+ branch_1 = Conv2D(
1477
+ 192,
1478
+ [1, 3],
1479
+ strides=1,
1480
+ padding="same",
1481
+ use_bias=False,
1482
+ name="Block8_4_Branch_4_Conv2d_0b_1x3",
1483
+ )(branch_1)
1484
+ branch_1 = BatchNormalization(
1485
+ axis=3,
1486
+ momentum=0.995,
1487
+ epsilon=0.001,
1488
+ scale=False,
1489
+ name="Block8_4_Branch_4_Conv2d_0b_1x3_BatchNorm",
1490
+ )(branch_1)
1491
+ branch_1 = Activation("relu", name="Block8_4_Branch_4_Conv2d_0b_1x3_Activation")(branch_1)
1492
+ branch_1 = Conv2D(
1493
+ 192,
1494
+ [3, 1],
1495
+ strides=1,
1496
+ padding="same",
1497
+ use_bias=False,
1498
+ name="Block8_4_Branch_4_Conv2d_0c_3x1",
1499
+ )(branch_1)
1500
+ branch_1 = BatchNormalization(
1501
+ axis=3,
1502
+ momentum=0.995,
1503
+ epsilon=0.001,
1504
+ scale=False,
1505
+ name="Block8_4_Branch_4_Conv2d_0c_3x1_BatchNorm",
1506
+ )(branch_1)
1507
+ branch_1 = Activation("relu", name="Block8_4_Branch_4_Conv2d_0c_3x1_Activation")(branch_1)
1508
+ branches = [branch_0, branch_1]
1509
+ mixed = Concatenate(axis=3, name="Block8_4_Concatenate")(branches)
1510
+ up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_4_Conv2d_1x1")(
1511
+ mixed
1512
+ )
1513
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
1514
+ x = add([x, up])
1515
+ x = Activation("relu", name="Block8_4_Activation")(x)
1516
+
1517
+ branch_0 = Conv2D(
1518
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_5_Branch_0_Conv2d_1x1"
1519
+ )(x)
1520
+ branch_0 = BatchNormalization(
1521
+ axis=3,
1522
+ momentum=0.995,
1523
+ epsilon=0.001,
1524
+ scale=False,
1525
+ name="Block8_5_Branch_0_Conv2d_1x1_BatchNorm",
1526
+ )(branch_0)
1527
+ branch_0 = Activation("relu", name="Block8_5_Branch_0_Conv2d_1x1_Activation")(branch_0)
1528
+ branch_1 = Conv2D(
1529
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_5_Branch_5_Conv2d_0a_1x1"
1530
+ )(x)
1531
+ branch_1 = BatchNormalization(
1532
+ axis=3,
1533
+ momentum=0.995,
1534
+ epsilon=0.001,
1535
+ scale=False,
1536
+ name="Block8_5_Branch_5_Conv2d_0a_1x1_BatchNorm",
1537
+ )(branch_1)
1538
+ branch_1 = Activation("relu", name="Block8_5_Branch_5_Conv2d_0a_1x1_Activation")(branch_1)
1539
+ branch_1 = Conv2D(
1540
+ 192,
1541
+ [1, 3],
1542
+ strides=1,
1543
+ padding="same",
1544
+ use_bias=False,
1545
+ name="Block8_5_Branch_5_Conv2d_0b_1x3",
1546
+ )(branch_1)
1547
+ branch_1 = BatchNormalization(
1548
+ axis=3,
1549
+ momentum=0.995,
1550
+ epsilon=0.001,
1551
+ scale=False,
1552
+ name="Block8_5_Branch_5_Conv2d_0b_1x3_BatchNorm",
1553
+ )(branch_1)
1554
+ branch_1 = Activation("relu", name="Block8_5_Branch_5_Conv2d_0b_1x3_Activation")(branch_1)
1555
+ branch_1 = Conv2D(
1556
+ 192,
1557
+ [3, 1],
1558
+ strides=1,
1559
+ padding="same",
1560
+ use_bias=False,
1561
+ name="Block8_5_Branch_5_Conv2d_0c_3x1",
1562
+ )(branch_1)
1563
+ branch_1 = BatchNormalization(
1564
+ axis=3,
1565
+ momentum=0.995,
1566
+ epsilon=0.001,
1567
+ scale=False,
1568
+ name="Block8_5_Branch_5_Conv2d_0c_3x1_BatchNorm",
1569
+ )(branch_1)
1570
+ branch_1 = Activation("relu", name="Block8_5_Branch_5_Conv2d_0c_3x1_Activation")(branch_1)
1571
+ branches = [branch_0, branch_1]
1572
+ mixed = Concatenate(axis=3, name="Block8_5_Concatenate")(branches)
1573
+ up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_5_Conv2d_1x1")(
1574
+ mixed
1575
+ )
1576
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
1577
+ x = add([x, up])
1578
+ x = Activation("relu", name="Block8_5_Activation")(x)
1579
+
1580
+ branch_0 = Conv2D(
1581
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_6_Branch_0_Conv2d_1x1"
1582
+ )(x)
1583
+ branch_0 = BatchNormalization(
1584
+ axis=3,
1585
+ momentum=0.995,
1586
+ epsilon=0.001,
1587
+ scale=False,
1588
+ name="Block8_6_Branch_0_Conv2d_1x1_BatchNorm",
1589
+ )(branch_0)
1590
+ branch_0 = Activation("relu", name="Block8_6_Branch_0_Conv2d_1x1_Activation")(branch_0)
1591
+ branch_1 = Conv2D(
1592
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_6_Branch_1_Conv2d_0a_1x1"
1593
+ )(x)
1594
+ branch_1 = BatchNormalization(
1595
+ axis=3,
1596
+ momentum=0.995,
1597
+ epsilon=0.001,
1598
+ scale=False,
1599
+ name="Block8_6_Branch_1_Conv2d_0a_1x1_BatchNorm",
1600
+ )(branch_1)
1601
+ branch_1 = Activation("relu", name="Block8_6_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
1602
+ branch_1 = Conv2D(
1603
+ 192,
1604
+ [1, 3],
1605
+ strides=1,
1606
+ padding="same",
1607
+ use_bias=False,
1608
+ name="Block8_6_Branch_1_Conv2d_0b_1x3",
1609
+ )(branch_1)
1610
+ branch_1 = BatchNormalization(
1611
+ axis=3,
1612
+ momentum=0.995,
1613
+ epsilon=0.001,
1614
+ scale=False,
1615
+ name="Block8_6_Branch_1_Conv2d_0b_1x3_BatchNorm",
1616
+ )(branch_1)
1617
+ branch_1 = Activation("relu", name="Block8_6_Branch_1_Conv2d_0b_1x3_Activation")(branch_1)
1618
+ branch_1 = Conv2D(
1619
+ 192,
1620
+ [3, 1],
1621
+ strides=1,
1622
+ padding="same",
1623
+ use_bias=False,
1624
+ name="Block8_6_Branch_1_Conv2d_0c_3x1",
1625
+ )(branch_1)
1626
+ branch_1 = BatchNormalization(
1627
+ axis=3,
1628
+ momentum=0.995,
1629
+ epsilon=0.001,
1630
+ scale=False,
1631
+ name="Block8_6_Branch_1_Conv2d_0c_3x1_BatchNorm",
1632
+ )(branch_1)
1633
+ branch_1 = Activation("relu", name="Block8_6_Branch_1_Conv2d_0c_3x1_Activation")(branch_1)
1634
+ branches = [branch_0, branch_1]
1635
+ mixed = Concatenate(axis=3, name="Block8_6_Concatenate")(branches)
1636
+ up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_6_Conv2d_1x1")(
1637
+ mixed
1638
+ )
1639
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 1})(up)
1640
+ x = add([x, up])
1641
+
1642
+ # Classification block
1643
+ x = GlobalAveragePooling2D(name="AvgPool")(x)
1644
+ x = Dropout(1.0 - 0.8, name="Dropout")(x)
1645
+ # Bottleneck
1646
+ x = Dense(dimension, use_bias=False, name="Bottleneck")(x)
1647
+ x = BatchNormalization(momentum=0.995, epsilon=0.001, scale=False, name="Bottleneck_BatchNorm")(
1648
+ x
1649
+ )
1650
+
1651
+ # Create model
1652
+ model = Model(inputs, x, name="inception_resnet_v1")
1653
+
1654
+ return model
1655
+
1656
+
1657
+ def load_facenet128d_model(
1658
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/facenet_weights.h5",
1659
+ ) -> Model:
1660
+ """
1661
+ Construct FaceNet-128d model, download weights and then load weights
1662
+ Args:
1663
+ dimension (int): construct FaceNet-128d or FaceNet-512d models
1664
+ Returns:
1665
+ model (Model)
1666
+ """
1667
+ model = InceptionResNetV1()
1668
+
1669
+ # -----------------------------------
1670
+
1671
+ home = folder_utils.get_deepface_home()
1672
+
1673
+ if os.path.isfile(home + "/.deepface/weights/facenet_weights.h5") != True:
1674
+ logger.info("facenet_weights.h5 will be downloaded...")
1675
+
1676
+ output = home + "/.deepface/weights/facenet_weights.h5"
1677
+ gdown.download(url, output, quiet=False)
1678
+
1679
+ # -----------------------------------
1680
+
1681
+ model.load_weights(home + "/.deepface/weights/facenet_weights.h5")
1682
+
1683
+ # -----------------------------------
1684
+
1685
+ return model
1686
+
1687
+
1688
+ def load_facenet512d_model(
1689
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/facenet512_weights.h5",
1690
+ ) -> Model:
1691
+ """
1692
+ Construct FaceNet-512d model, download its weights and load
1693
+ Returns:
1694
+ model (Model)
1695
+ """
1696
+
1697
+ model = InceptionResNetV1(dimension=512)
1698
+
1699
+ # -------------------------
1700
+
1701
+ home = folder_utils.get_deepface_home()
1702
+
1703
+ if os.path.isfile(home + "/.deepface/weights/facenet512_weights.h5") != True:
1704
+ logger.info("facenet512_weights.h5 will be downloaded...")
1705
+
1706
+ output = home + "/.deepface/weights/facenet512_weights.h5"
1707
+ gdown.download(url, output, quiet=False)
1708
+
1709
+ # -------------------------
1710
+
1711
+ model.load_weights(home + "/.deepface/weights/facenet512_weights.h5")
1712
+
1713
+ # -------------------------
1714
+
1715
+ return model
basemodels/FbDeepFace.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import zipfile
3
+ import gdown
4
+ from deepface.commons import package_utils, folder_utils
5
+ from deepface.models.FacialRecognition import FacialRecognition
6
+ from deepface.commons import logger as log
7
+
8
+ logger = log.get_singletonish_logger()
9
+
10
+ # --------------------------------
11
+ # dependency configuration
12
+
13
+ tf_major = package_utils.get_tf_major_version()
14
+ tf_minor = package_utils.get_tf_minor_version()
15
+
16
+ if tf_major == 1:
17
+ from keras.models import Model, Sequential
18
+ from keras.layers import (
19
+ Convolution2D,
20
+ MaxPooling2D,
21
+ Flatten,
22
+ Dense,
23
+ Dropout,
24
+ )
25
+ else:
26
+ from tensorflow.keras.models import Model, Sequential
27
+ from tensorflow.keras.layers import (
28
+ Convolution2D,
29
+ MaxPooling2D,
30
+ Flatten,
31
+ Dense,
32
+ Dropout,
33
+ )
34
+
35
+
36
+ # -------------------------------------
37
+ # pylint: disable=line-too-long, too-few-public-methods
38
+ class DeepFaceClient(FacialRecognition):
39
+ """
40
+ Fb's DeepFace model class
41
+ """
42
+
43
+ def __init__(self):
44
+ # DeepFace requires tf 2.12 or less
45
+ if tf_major == 2 and tf_minor > 12:
46
+ # Ref: https://github.com/serengil/deepface/pull/1079
47
+ raise ValueError(
48
+ "DeepFace model requires LocallyConnected2D but it is no longer supported"
49
+ f" after tf 2.12 but you have {tf_major}.{tf_minor}. You need to downgrade your tf."
50
+ )
51
+
52
+ self.model = load_model()
53
+ self.model_name = "DeepFace"
54
+ self.input_shape = (152, 152)
55
+ self.output_shape = 4096
56
+
57
+
58
+ def load_model(
59
+ url="https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip",
60
+ ) -> Model:
61
+ """
62
+ Construct DeepFace model, download its weights and load
63
+ """
64
+ # we have some checks for this dependency in the init of client
65
+ # putting this in global causes library initialization
66
+ if tf_major == 1:
67
+ from keras.layers import LocallyConnected2D
68
+ else:
69
+ from tensorflow.keras.layers import LocallyConnected2D
70
+
71
+ base_model = Sequential()
72
+ base_model.add(
73
+ Convolution2D(32, (11, 11), activation="relu", name="C1", input_shape=(152, 152, 3))
74
+ )
75
+ base_model.add(MaxPooling2D(pool_size=3, strides=2, padding="same", name="M2"))
76
+ base_model.add(Convolution2D(16, (9, 9), activation="relu", name="C3"))
77
+ base_model.add(LocallyConnected2D(16, (9, 9), activation="relu", name="L4"))
78
+ base_model.add(LocallyConnected2D(16, (7, 7), strides=2, activation="relu", name="L5"))
79
+ base_model.add(LocallyConnected2D(16, (5, 5), activation="relu", name="L6"))
80
+ base_model.add(Flatten(name="F0"))
81
+ base_model.add(Dense(4096, activation="relu", name="F7"))
82
+ base_model.add(Dropout(rate=0.5, name="D0"))
83
+ base_model.add(Dense(8631, activation="softmax", name="F8"))
84
+
85
+ # ---------------------------------
86
+
87
+ home = folder_utils.get_deepface_home()
88
+
89
+ if os.path.isfile(home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5") != True:
90
+ logger.info("VGGFace2_DeepFace_weights_val-0.9034.h5 will be downloaded...")
91
+
92
+ output = home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5.zip"
93
+
94
+ gdown.download(url, output, quiet=False)
95
+
96
+ # unzip VGGFace2_DeepFace_weights_val-0.9034.h5.zip
97
+ with zipfile.ZipFile(output, "r") as zip_ref:
98
+ zip_ref.extractall(home + "/.deepface/weights/")
99
+
100
+ base_model.load_weights(home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5")
101
+
102
+ # drop F8 and D0. F7 is the representation layer.
103
+ deepface_model = Model(inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output)
104
+
105
+ return deepface_model
basemodels/GhostFaceNet.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # built-in dependencies
2
+ import os
3
+
4
+ # 3rd party dependencies
5
+ import gdown
6
+ import tensorflow as tf
7
+
8
+ # project dependencies
9
+ from deepface.commons import package_utils, folder_utils
10
+ from deepface.models.FacialRecognition import FacialRecognition
11
+ from deepface.commons import logger as log
12
+
13
+ logger = log.get_singletonish_logger()
14
+
15
+ tf_major = package_utils.get_tf_major_version()
16
+ if tf_major == 1:
17
+ import keras
18
+ from keras import backend as K
19
+ from keras.models import Model
20
+ from keras.layers import (
21
+ Activation,
22
+ Add,
23
+ BatchNormalization,
24
+ Concatenate,
25
+ Conv2D,
26
+ DepthwiseConv2D,
27
+ GlobalAveragePooling2D,
28
+ Input,
29
+ Reshape,
30
+ Multiply,
31
+ ReLU,
32
+ PReLU,
33
+ )
34
+ else:
35
+ from tensorflow import keras
36
+ from tensorflow.keras import backend as K
37
+ from tensorflow.keras.models import Model
38
+ from tensorflow.keras.layers import (
39
+ Activation,
40
+ Add,
41
+ BatchNormalization,
42
+ Concatenate,
43
+ Conv2D,
44
+ DepthwiseConv2D,
45
+ GlobalAveragePooling2D,
46
+ Input,
47
+ Reshape,
48
+ Multiply,
49
+ ReLU,
50
+ PReLU,
51
+ )
52
+
53
+
54
+ # pylint: disable=line-too-long, too-few-public-methods, no-else-return, unsubscriptable-object, comparison-with-callable
55
+ PRETRAINED_WEIGHTS = "https://github.com/HamadYA/GhostFaceNets/releases/download/v1.2/GhostFaceNet_W1.3_S1_ArcFace.h5"
56
+
57
+
58
+ class GhostFaceNetClient(FacialRecognition):
59
+ """
60
+ GhostFaceNet model (GhostFaceNetV1 backbone)
61
+ Repo: https://github.com/HamadYA/GhostFaceNets
62
+ Pre-trained weights: https://github.com/HamadYA/GhostFaceNets/releases/tag/v1.2
63
+ GhostFaceNet_W1.3_S1_ArcFace.h5 ~ 16.5MB
64
+ Author declared that this backbone and pre-trained weights got 99.7667% accuracy on LFW
65
+ """
66
+
67
+ def __init__(self):
68
+ self.model_name = "GhostFaceNet"
69
+ self.input_shape = (112, 112)
70
+ self.output_shape = 512
71
+ self.model = load_model()
72
+
73
+
74
+ def load_model():
75
+ model = GhostFaceNetV1()
76
+
77
+ home = folder_utils.get_deepface_home()
78
+ output = home + "/.deepface/weights/ghostfacenet_v1.h5"
79
+
80
+ if os.path.isfile(output) is not True:
81
+ logger.info(f"Pre-trained weights is downloaded from {PRETRAINED_WEIGHTS} to {output}")
82
+ gdown.download(PRETRAINED_WEIGHTS, output, quiet=False)
83
+ logger.info(f"Pre-trained weights is just downloaded to {output}")
84
+
85
+ model.load_weights(output)
86
+
87
+ return model
88
+
89
+
90
+ def GhostFaceNetV1() -> Model:
91
+ """
92
+ Build GhostFaceNetV1 model. Refactored from
93
+ github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
94
+ Returns:
95
+ model (Model)
96
+ """
97
+ inputs = Input(shape=(112, 112, 3))
98
+
99
+ out_channel = 20
100
+
101
+ nn = Conv2D(
102
+ out_channel,
103
+ (3, 3),
104
+ strides=1,
105
+ padding="same",
106
+ use_bias=False,
107
+ kernel_initializer=keras.initializers.VarianceScaling(
108
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
109
+ ),
110
+ )(inputs)
111
+
112
+ nn = BatchNormalization(axis=-1)(nn)
113
+ nn = Activation("relu")(nn)
114
+
115
+ dwkernels = [3, 3, 3, 5, 5, 3, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5]
116
+ exps = [20, 64, 92, 92, 156, 312, 260, 240, 240, 624, 872, 872, 1248, 1248, 1248, 664]
117
+ outs = [20, 32, 32, 52, 52, 104, 104, 104, 104, 144, 144, 208, 208, 208, 208, 208]
118
+ strides_set = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1]
119
+ reductions = [0, 0, 0, 24, 40, 0, 0, 0, 0, 156, 220, 220, 0, 312, 0, 168]
120
+
121
+ pre_out = out_channel
122
+ for dwk, stride, exp, out, reduction in zip(dwkernels, strides_set, exps, outs, reductions):
123
+ shortcut = not (out == pre_out and stride == 1)
124
+ nn = ghost_bottleneck(nn, dwk, stride, exp, out, reduction, shortcut)
125
+ pre_out = out
126
+
127
+ nn = Conv2D(
128
+ 664,
129
+ (1, 1),
130
+ strides=(1, 1),
131
+ padding="valid",
132
+ use_bias=False,
133
+ kernel_initializer=keras.initializers.VarianceScaling(
134
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
135
+ ),
136
+ )(nn)
137
+ nn = BatchNormalization(axis=-1)(nn)
138
+ nn = Activation("relu")(nn)
139
+
140
+ xx = Model(inputs=inputs, outputs=nn, name="GhostFaceNetV1")
141
+
142
+ # post modelling
143
+ inputs = xx.inputs[0]
144
+ nn = xx.outputs[0]
145
+
146
+ nn = keras.layers.DepthwiseConv2D(nn.shape[1], use_bias=False, name="GDC_dw")(nn)
147
+ nn = keras.layers.BatchNormalization(momentum=0.99, epsilon=0.001, name="GDC_batchnorm")(nn)
148
+ nn = keras.layers.Conv2D(
149
+ 512, 1, use_bias=True, kernel_initializer="glorot_normal", name="GDC_conv"
150
+ )(nn)
151
+ nn = keras.layers.Flatten(name="GDC_flatten")(nn)
152
+
153
+ embedding = keras.layers.BatchNormalization(
154
+ momentum=0.99, epsilon=0.001, scale=True, name="pre_embedding"
155
+ )(nn)
156
+ embedding_fp32 = keras.layers.Activation("linear", dtype="float32", name="embedding")(embedding)
157
+
158
+ model = keras.models.Model(inputs, embedding_fp32, name=xx.name)
159
+ model = replace_relu_with_prelu(model=model)
160
+ return model
161
+
162
+
163
+ def se_module(inputs, reduction):
164
+ """
165
+ Refactored from github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
166
+ """
167
+ # get the channel axis
168
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
169
+ # filters = channel axis shape
170
+ filters = inputs.shape[channel_axis]
171
+
172
+ # from None x H x W x C to None x C
173
+ se = GlobalAveragePooling2D()(inputs)
174
+
175
+ # Reshape None x C to None 1 x 1 x C
176
+ se = Reshape((1, 1, filters))(se)
177
+
178
+ # Squeeze by using C*se_ratio. The size will be 1 x 1 x C*se_ratio
179
+ se = Conv2D(
180
+ reduction,
181
+ kernel_size=1,
182
+ use_bias=True,
183
+ kernel_initializer=keras.initializers.VarianceScaling(
184
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
185
+ ),
186
+ )(se)
187
+ se = Activation("relu")(se)
188
+
189
+ # Excitation using C filters. The size will be 1 x 1 x C
190
+ se = Conv2D(
191
+ filters,
192
+ kernel_size=1,
193
+ use_bias=True,
194
+ kernel_initializer=keras.initializers.VarianceScaling(
195
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
196
+ ),
197
+ )(se)
198
+ se = Activation("hard_sigmoid")(se)
199
+
200
+ return Multiply()([inputs, se])
201
+
202
+
203
+ def ghost_module(inputs, out, convkernel=1, dwkernel=3, add_activation=True):
204
+ """
205
+ Refactored from github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
206
+ """
207
+ conv_out_channel = out // 2
208
+ cc = Conv2D(
209
+ conv_out_channel,
210
+ convkernel,
211
+ use_bias=False,
212
+ strides=(1, 1),
213
+ padding="same",
214
+ kernel_initializer=keras.initializers.VarianceScaling(
215
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
216
+ ),
217
+ )(inputs)
218
+ cc = BatchNormalization(axis=-1)(cc)
219
+ if add_activation:
220
+ cc = Activation("relu")(cc)
221
+
222
+ nn = DepthwiseConv2D(
223
+ dwkernel,
224
+ 1,
225
+ padding="same",
226
+ use_bias=False,
227
+ depthwise_initializer=keras.initializers.VarianceScaling(
228
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
229
+ ),
230
+ )(cc)
231
+ nn = BatchNormalization(axis=-1)(nn)
232
+ if add_activation:
233
+ nn = Activation("relu")(nn)
234
+ return Concatenate()([cc, nn])
235
+
236
+
237
+ def ghost_bottleneck(inputs, dwkernel, strides, exp, out, reduction, shortcut=True):
238
+ """
239
+ Refactored from github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
240
+ """
241
+ nn = ghost_module(inputs, exp, add_activation=True)
242
+ if strides > 1:
243
+ # Extra depth conv if strides higher than 1
244
+ nn = DepthwiseConv2D(
245
+ dwkernel,
246
+ strides,
247
+ padding="same",
248
+ use_bias=False,
249
+ depthwise_initializer=keras.initializers.VarianceScaling(
250
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
251
+ ),
252
+ )(nn)
253
+ nn = BatchNormalization(axis=-1)(nn)
254
+
255
+ if reduction > 0:
256
+ # Squeeze and excite
257
+ nn = se_module(nn, reduction)
258
+
259
+ # Point-wise linear projection
260
+ nn = ghost_module(nn, out, add_activation=False) # ghost2 = GhostModule(exp, out, relu=False)
261
+
262
+ if shortcut:
263
+ xx = DepthwiseConv2D(
264
+ dwkernel,
265
+ strides,
266
+ padding="same",
267
+ use_bias=False,
268
+ depthwise_initializer=keras.initializers.VarianceScaling(
269
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
270
+ ),
271
+ )(inputs)
272
+ xx = BatchNormalization(axis=-1)(xx)
273
+ xx = Conv2D(
274
+ out,
275
+ (1, 1),
276
+ strides=(1, 1),
277
+ padding="valid",
278
+ use_bias=False,
279
+ kernel_initializer=keras.initializers.VarianceScaling(
280
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
281
+ ),
282
+ )(xx)
283
+ xx = BatchNormalization(axis=-1)(xx)
284
+ else:
285
+ xx = inputs
286
+ return Add()([xx, nn])
287
+
288
+
289
+ def replace_relu_with_prelu(model) -> Model:
290
+ """
291
+ Replaces relu activation function in the built model with prelu.
292
+ Refactored from github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
293
+ Args:
294
+ model (Model): built model with relu activation functions
295
+ Returns
296
+ model (Model): built model with prelu activation functions
297
+ """
298
+
299
+ def convert_relu(layer):
300
+ if isinstance(layer, ReLU) or (
301
+ isinstance(layer, Activation) and layer.activation == keras.activations.relu
302
+ ):
303
+ layer_name = layer.name.replace("_relu", "_prelu")
304
+ return PReLU(
305
+ shared_axes=[1, 2],
306
+ alpha_initializer=tf.initializers.Constant(0.25),
307
+ name=layer_name,
308
+ )
309
+ return layer
310
+
311
+ input_tensors = keras.layers.Input(model.input_shape[1:])
312
+ return keras.models.clone_model(model, input_tensors=input_tensors, clone_function=convert_relu)
basemodels/OpenFace.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gdown
3
+ import tensorflow as tf
4
+ from deepface.commons import package_utils, folder_utils
5
+ from deepface.models.FacialRecognition import FacialRecognition
6
+ from deepface.commons import logger as log
7
+
8
+ logger = log.get_singletonish_logger()
9
+
10
+ tf_version = package_utils.get_tf_major_version()
11
+ if tf_version == 1:
12
+ from keras.models import Model
13
+ from keras.layers import Conv2D, ZeroPadding2D, Input, concatenate
14
+ from keras.layers import Dense, Activation, Lambda, Flatten, BatchNormalization
15
+ from keras.layers import MaxPooling2D, AveragePooling2D
16
+ from keras import backend as K
17
+ else:
18
+ from tensorflow.keras.models import Model
19
+ from tensorflow.keras.layers import Conv2D, ZeroPadding2D, Input, concatenate
20
+ from tensorflow.keras.layers import Dense, Activation, Lambda, Flatten, BatchNormalization
21
+ from tensorflow.keras.layers import MaxPooling2D, AveragePooling2D
22
+ from tensorflow.keras import backend as K
23
+
24
+ # pylint: disable=unnecessary-lambda
25
+
26
+ # ---------------------------------------
27
+
28
+ # pylint: disable=too-few-public-methods
29
+ class OpenFaceClient(FacialRecognition):
30
+ """
31
+ OpenFace model class
32
+ """
33
+
34
+ def __init__(self):
35
+ self.model = load_model()
36
+ self.model_name = "OpenFace"
37
+ self.input_shape = (96, 96)
38
+ self.output_shape = 128
39
+
40
+
41
+ def load_model(
42
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/openface_weights.h5",
43
+ ) -> Model:
44
+ """
45
+ Consturct OpenFace model, download its weights and load
46
+ Returns:
47
+ model (Model)
48
+ """
49
+ myInput = Input(shape=(96, 96, 3))
50
+
51
+ x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
52
+ x = Conv2D(64, (7, 7), strides=(2, 2), name="conv1")(x)
53
+ x = BatchNormalization(axis=3, epsilon=0.00001, name="bn1")(x)
54
+ x = Activation("relu")(x)
55
+ x = ZeroPadding2D(padding=(1, 1))(x)
56
+ x = MaxPooling2D(pool_size=3, strides=2)(x)
57
+ x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_1")(x)
58
+ x = Conv2D(64, (1, 1), name="conv2")(x)
59
+ x = BatchNormalization(axis=3, epsilon=0.00001, name="bn2")(x)
60
+ x = Activation("relu")(x)
61
+ x = ZeroPadding2D(padding=(1, 1))(x)
62
+ x = Conv2D(192, (3, 3), name="conv3")(x)
63
+ x = BatchNormalization(axis=3, epsilon=0.00001, name="bn3")(x)
64
+ x = Activation("relu")(x)
65
+ x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_2")(x) # x is equal added
66
+ x = ZeroPadding2D(padding=(1, 1))(x)
67
+ x = MaxPooling2D(pool_size=3, strides=2)(x)
68
+
69
+ # Inception3a
70
+ inception_3a_3x3 = Conv2D(96, (1, 1), name="inception_3a_3x3_conv1")(x)
71
+ inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_3x3_bn1")(
72
+ inception_3a_3x3
73
+ )
74
+ inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
75
+ inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
76
+ inception_3a_3x3 = Conv2D(128, (3, 3), name="inception_3a_3x3_conv2")(inception_3a_3x3)
77
+ inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_3x3_bn2")(
78
+ inception_3a_3x3
79
+ )
80
+ inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
81
+
82
+ inception_3a_5x5 = Conv2D(16, (1, 1), name="inception_3a_5x5_conv1")(x)
83
+ inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_5x5_bn1")(
84
+ inception_3a_5x5
85
+ )
86
+ inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
87
+ inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
88
+ inception_3a_5x5 = Conv2D(32, (5, 5), name="inception_3a_5x5_conv2")(inception_3a_5x5)
89
+ inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_5x5_bn2")(
90
+ inception_3a_5x5
91
+ )
92
+ inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
93
+
94
+ inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
95
+ inception_3a_pool = Conv2D(32, (1, 1), name="inception_3a_pool_conv")(inception_3a_pool)
96
+ inception_3a_pool = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_pool_bn")(
97
+ inception_3a_pool
98
+ )
99
+ inception_3a_pool = Activation("relu")(inception_3a_pool)
100
+ inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3, 4)))(inception_3a_pool)
101
+
102
+ inception_3a_1x1 = Conv2D(64, (1, 1), name="inception_3a_1x1_conv")(x)
103
+ inception_3a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_1x1_bn")(
104
+ inception_3a_1x1
105
+ )
106
+ inception_3a_1x1 = Activation("relu")(inception_3a_1x1)
107
+
108
+ inception_3a = concatenate(
109
+ [inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1], axis=3
110
+ )
111
+
112
+ # Inception3b
113
+ inception_3b_3x3 = Conv2D(96, (1, 1), name="inception_3b_3x3_conv1")(inception_3a)
114
+ inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_3x3_bn1")(
115
+ inception_3b_3x3
116
+ )
117
+ inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
118
+ inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
119
+ inception_3b_3x3 = Conv2D(128, (3, 3), name="inception_3b_3x3_conv2")(inception_3b_3x3)
120
+ inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_3x3_bn2")(
121
+ inception_3b_3x3
122
+ )
123
+ inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
124
+
125
+ inception_3b_5x5 = Conv2D(32, (1, 1), name="inception_3b_5x5_conv1")(inception_3a)
126
+ inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_5x5_bn1")(
127
+ inception_3b_5x5
128
+ )
129
+ inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
130
+ inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
131
+ inception_3b_5x5 = Conv2D(64, (5, 5), name="inception_3b_5x5_conv2")(inception_3b_5x5)
132
+ inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_5x5_bn2")(
133
+ inception_3b_5x5
134
+ )
135
+ inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
136
+
137
+ inception_3b_pool = Lambda(lambda x: x**2, name="power2_3b")(inception_3a)
138
+ inception_3b_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_3b_pool)
139
+ inception_3b_pool = Lambda(lambda x: x * 9, name="mult9_3b")(inception_3b_pool)
140
+ inception_3b_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_3b")(inception_3b_pool)
141
+ inception_3b_pool = Conv2D(64, (1, 1), name="inception_3b_pool_conv")(inception_3b_pool)
142
+ inception_3b_pool = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_pool_bn")(
143
+ inception_3b_pool
144
+ )
145
+ inception_3b_pool = Activation("relu")(inception_3b_pool)
146
+ inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)
147
+
148
+ inception_3b_1x1 = Conv2D(64, (1, 1), name="inception_3b_1x1_conv")(inception_3a)
149
+ inception_3b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_1x1_bn")(
150
+ inception_3b_1x1
151
+ )
152
+ inception_3b_1x1 = Activation("relu")(inception_3b_1x1)
153
+
154
+ inception_3b = concatenate(
155
+ [inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1], axis=3
156
+ )
157
+
158
+ # Inception3c
159
+ inception_3c_3x3 = Conv2D(128, (1, 1), strides=(1, 1), name="inception_3c_3x3_conv1")(
160
+ inception_3b
161
+ )
162
+ inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3c_3x3_bn1")(
163
+ inception_3c_3x3
164
+ )
165
+ inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
166
+ inception_3c_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3c_3x3)
167
+ inception_3c_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name="inception_3c_3x3_conv" + "2")(
168
+ inception_3c_3x3
169
+ )
170
+ inception_3c_3x3 = BatchNormalization(
171
+ axis=3, epsilon=0.00001, name="inception_3c_3x3_bn" + "2"
172
+ )(inception_3c_3x3)
173
+ inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
174
+
175
+ inception_3c_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name="inception_3c_5x5_conv1")(
176
+ inception_3b
177
+ )
178
+ inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3c_5x5_bn1")(
179
+ inception_3c_5x5
180
+ )
181
+ inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
182
+ inception_3c_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3c_5x5)
183
+ inception_3c_5x5 = Conv2D(64, (5, 5), strides=(2, 2), name="inception_3c_5x5_conv" + "2")(
184
+ inception_3c_5x5
185
+ )
186
+ inception_3c_5x5 = BatchNormalization(
187
+ axis=3, epsilon=0.00001, name="inception_3c_5x5_bn" + "2"
188
+ )(inception_3c_5x5)
189
+ inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
190
+
191
+ inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
192
+ inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_3c_pool)
193
+
194
+ inception_3c = concatenate([inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)
195
+
196
+ # inception 4a
197
+ inception_4a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_4a_3x3_conv" + "1")(
198
+ inception_3c
199
+ )
200
+ inception_4a_3x3 = BatchNormalization(
201
+ axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "1"
202
+ )(inception_4a_3x3)
203
+ inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
204
+ inception_4a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4a_3x3)
205
+ inception_4a_3x3 = Conv2D(192, (3, 3), strides=(1, 1), name="inception_4a_3x3_conv" + "2")(
206
+ inception_4a_3x3
207
+ )
208
+ inception_4a_3x3 = BatchNormalization(
209
+ axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "2"
210
+ )(inception_4a_3x3)
211
+ inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
212
+
213
+ inception_4a_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name="inception_4a_5x5_conv1")(
214
+ inception_3c
215
+ )
216
+ inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_4a_5x5_bn1")(
217
+ inception_4a_5x5
218
+ )
219
+ inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
220
+ inception_4a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4a_5x5)
221
+ inception_4a_5x5 = Conv2D(64, (5, 5), strides=(1, 1), name="inception_4a_5x5_conv" + "2")(
222
+ inception_4a_5x5
223
+ )
224
+ inception_4a_5x5 = BatchNormalization(
225
+ axis=3, epsilon=0.00001, name="inception_4a_5x5_bn" + "2"
226
+ )(inception_4a_5x5)
227
+ inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
228
+
229
+ inception_4a_pool = Lambda(lambda x: x**2, name="power2_4a")(inception_3c)
230
+ inception_4a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_4a_pool)
231
+ inception_4a_pool = Lambda(lambda x: x * 9, name="mult9_4a")(inception_4a_pool)
232
+ inception_4a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_4a")(inception_4a_pool)
233
+
234
+ inception_4a_pool = Conv2D(128, (1, 1), strides=(1, 1), name="inception_4a_pool_conv" + "")(
235
+ inception_4a_pool
236
+ )
237
+ inception_4a_pool = BatchNormalization(
238
+ axis=3, epsilon=0.00001, name="inception_4a_pool_bn" + ""
239
+ )(inception_4a_pool)
240
+ inception_4a_pool = Activation("relu")(inception_4a_pool)
241
+ inception_4a_pool = ZeroPadding2D(padding=(2, 2))(inception_4a_pool)
242
+
243
+ inception_4a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_4a_1x1_conv" + "")(
244
+ inception_3c
245
+ )
246
+ inception_4a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_4a_1x1_bn" + "")(
247
+ inception_4a_1x1
248
+ )
249
+ inception_4a_1x1 = Activation("relu")(inception_4a_1x1)
250
+
251
+ inception_4a = concatenate(
252
+ [inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1], axis=3
253
+ )
254
+
255
+ # inception4e
256
+ inception_4e_3x3 = Conv2D(160, (1, 1), strides=(1, 1), name="inception_4e_3x3_conv" + "1")(
257
+ inception_4a
258
+ )
259
+ inception_4e_3x3 = BatchNormalization(
260
+ axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "1"
261
+ )(inception_4e_3x3)
262
+ inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
263
+ inception_4e_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4e_3x3)
264
+ inception_4e_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name="inception_4e_3x3_conv" + "2")(
265
+ inception_4e_3x3
266
+ )
267
+ inception_4e_3x3 = BatchNormalization(
268
+ axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "2"
269
+ )(inception_4e_3x3)
270
+ inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
271
+
272
+ inception_4e_5x5 = Conv2D(64, (1, 1), strides=(1, 1), name="inception_4e_5x5_conv" + "1")(
273
+ inception_4a
274
+ )
275
+ inception_4e_5x5 = BatchNormalization(
276
+ axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "1"
277
+ )(inception_4e_5x5)
278
+ inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
279
+ inception_4e_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4e_5x5)
280
+ inception_4e_5x5 = Conv2D(128, (5, 5), strides=(2, 2), name="inception_4e_5x5_conv" + "2")(
281
+ inception_4e_5x5
282
+ )
283
+ inception_4e_5x5 = BatchNormalization(
284
+ axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "2"
285
+ )(inception_4e_5x5)
286
+ inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
287
+
288
+ inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
289
+ inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_4e_pool)
290
+
291
+ inception_4e = concatenate([inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)
292
+
293
+ # inception5a
294
+ inception_5a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5a_3x3_conv" + "1")(
295
+ inception_4e
296
+ )
297
+ inception_5a_3x3 = BatchNormalization(
298
+ axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "1"
299
+ )(inception_5a_3x3)
300
+ inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
301
+ inception_5a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5a_3x3)
302
+ inception_5a_3x3 = Conv2D(384, (3, 3), strides=(1, 1), name="inception_5a_3x3_conv" + "2")(
303
+ inception_5a_3x3
304
+ )
305
+ inception_5a_3x3 = BatchNormalization(
306
+ axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "2"
307
+ )(inception_5a_3x3)
308
+ inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
309
+
310
+ inception_5a_pool = Lambda(lambda x: x**2, name="power2_5a")(inception_4e)
311
+ inception_5a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_5a_pool)
312
+ inception_5a_pool = Lambda(lambda x: x * 9, name="mult9_5a")(inception_5a_pool)
313
+ inception_5a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_5a")(inception_5a_pool)
314
+
315
+ inception_5a_pool = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5a_pool_conv" + "")(
316
+ inception_5a_pool
317
+ )
318
+ inception_5a_pool = BatchNormalization(
319
+ axis=3, epsilon=0.00001, name="inception_5a_pool_bn" + ""
320
+ )(inception_5a_pool)
321
+ inception_5a_pool = Activation("relu")(inception_5a_pool)
322
+ inception_5a_pool = ZeroPadding2D(padding=(1, 1))(inception_5a_pool)
323
+
324
+ inception_5a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_5a_1x1_conv" + "")(
325
+ inception_4e
326
+ )
327
+ inception_5a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_5a_1x1_bn" + "")(
328
+ inception_5a_1x1
329
+ )
330
+ inception_5a_1x1 = Activation("relu")(inception_5a_1x1)
331
+
332
+ inception_5a = concatenate([inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)
333
+
334
+ # inception_5b
335
+ inception_5b_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5b_3x3_conv" + "1")(
336
+ inception_5a
337
+ )
338
+ inception_5b_3x3 = BatchNormalization(
339
+ axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "1"
340
+ )(inception_5b_3x3)
341
+ inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
342
+ inception_5b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5b_3x3)
343
+ inception_5b_3x3 = Conv2D(384, (3, 3), strides=(1, 1), name="inception_5b_3x3_conv" + "2")(
344
+ inception_5b_3x3
345
+ )
346
+ inception_5b_3x3 = BatchNormalization(
347
+ axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "2"
348
+ )(inception_5b_3x3)
349
+ inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
350
+
351
+ inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)
352
+
353
+ inception_5b_pool = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5b_pool_conv" + "")(
354
+ inception_5b_pool
355
+ )
356
+ inception_5b_pool = BatchNormalization(
357
+ axis=3, epsilon=0.00001, name="inception_5b_pool_bn" + ""
358
+ )(inception_5b_pool)
359
+ inception_5b_pool = Activation("relu")(inception_5b_pool)
360
+
361
+ inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)
362
+
363
+ inception_5b_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_5b_1x1_conv" + "")(
364
+ inception_5a
365
+ )
366
+ inception_5b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_5b_1x1_bn" + "")(
367
+ inception_5b_1x1
368
+ )
369
+ inception_5b_1x1 = Activation("relu")(inception_5b_1x1)
370
+
371
+ inception_5b = concatenate([inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)
372
+
373
+ av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
374
+ reshape_layer = Flatten()(av_pool)
375
+ dense_layer = Dense(128, name="dense_layer")(reshape_layer)
376
+ norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1), name="norm_layer")(dense_layer)
377
+
378
+ # Final Model
379
+ model = Model(inputs=[myInput], outputs=norm_layer)
380
+
381
+ # -----------------------------------
382
+
383
+ home = folder_utils.get_deepface_home()
384
+
385
+ if os.path.isfile(home + "/.deepface/weights/openface_weights.h5") != True:
386
+ logger.info("openface_weights.h5 will be downloaded...")
387
+
388
+ output = home + "/.deepface/weights/openface_weights.h5"
389
+ gdown.download(url, output, quiet=False)
390
+
391
+ # -----------------------------------
392
+
393
+ model.load_weights(home + "/.deepface/weights/openface_weights.h5")
394
+
395
+ # -----------------------------------
396
+
397
+ return model
basemodels/SFace.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # built-in dependencies
2
+ import os
3
+ from typing import Any, List
4
+
5
+ # 3rd party dependencies
6
+ import numpy as np
7
+ import cv2 as cv
8
+ import gdown
9
+
10
+ # project dependencies
11
+ from deepface.commons import folder_utils
12
+ from deepface.models.FacialRecognition import FacialRecognition
13
+ from deepface.commons import logger as log
14
+
15
+ logger = log.get_singletonish_logger()
16
+
17
+ # pylint: disable=line-too-long, too-few-public-methods
18
+
19
+
20
+ class SFaceClient(FacialRecognition):
21
+ """
22
+ SFace model class
23
+ """
24
+
25
+ def __init__(self):
26
+ self.model = load_model()
27
+ self.model_name = "SFace"
28
+ self.input_shape = (112, 112)
29
+ self.output_shape = 128
30
+
31
+ def forward(self, img: np.ndarray) -> List[float]:
32
+ """
33
+ Find embeddings with SFace model
34
+ This model necessitates the override of the forward method
35
+ because it is not a keras model.
36
+ Args:
37
+ img (np.ndarray): pre-loaded image in BGR
38
+ Returns
39
+ embeddings (list): multi-dimensional vector
40
+ """
41
+ # return self.model.predict(img)[0].tolist()
42
+
43
+ # revert the image to original format and preprocess using the model
44
+ input_blob = (img[0] * 255).astype(np.uint8)
45
+
46
+ embeddings = self.model.model.feature(input_blob)
47
+
48
+ return embeddings[0].tolist()
49
+
50
+
51
+ def load_model(
52
+ url="https://github.com/opencv/opencv_zoo/raw/main/models/face_recognition_sface/face_recognition_sface_2021dec.onnx",
53
+ ) -> Any:
54
+ """
55
+ Construct SFace model, download its weights and load
56
+ """
57
+
58
+ home = folder_utils.get_deepface_home()
59
+
60
+ file_name = home + "/.deepface/weights/face_recognition_sface_2021dec.onnx"
61
+
62
+ if not os.path.isfile(file_name):
63
+
64
+ logger.info("sface weights will be downloaded...")
65
+
66
+ gdown.download(url, file_name, quiet=False)
67
+
68
+ model = SFaceWrapper(model_path=file_name)
69
+
70
+ return model
71
+
72
+
73
+ class SFaceWrapper:
74
+ def __init__(self, model_path):
75
+ """
76
+ SFace wrapper covering model construction, layer infos and predict
77
+ """
78
+ try:
79
+ self.model = cv.FaceRecognizerSF.create(
80
+ model=model_path, config="", backend_id=0, target_id=0
81
+ )
82
+ except Exception as err:
83
+ raise ValueError(
84
+ "Exception while calling opencv.FaceRecognizerSF module."
85
+ + "This is an optional dependency."
86
+ + "You can install it as pip install opencv-contrib-python."
87
+ ) from err
basemodels/VGGFace.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import os
3
+ import gdown
4
+ import numpy as np
5
+ from deepface.commons import package_utils, folder_utils
6
+ from deepface.modules import verification
7
+ from deepface.models.FacialRecognition import FacialRecognition
8
+ from deepface.commons import logger as log
9
+
10
+ logger = log.get_singletonish_logger()
11
+
12
+ # ---------------------------------------
13
+
14
+ tf_version = package_utils.get_tf_major_version()
15
+ if tf_version == 1:
16
+ from keras.models import Model, Sequential
17
+ from keras.layers import (
18
+ Convolution2D,
19
+ ZeroPadding2D,
20
+ MaxPooling2D,
21
+ Flatten,
22
+ Dropout,
23
+ Activation,
24
+ )
25
+ else:
26
+ from tensorflow.keras.models import Model, Sequential
27
+ from tensorflow.keras.layers import (
28
+ Convolution2D,
29
+ ZeroPadding2D,
30
+ MaxPooling2D,
31
+ Flatten,
32
+ Dropout,
33
+ Activation,
34
+ )
35
+
36
+ # ---------------------------------------
37
+
38
+ # pylint: disable=too-few-public-methods
39
+ class VggFaceClient(FacialRecognition):
40
+ """
41
+ VGG-Face model class
42
+ """
43
+
44
+ def __init__(self):
45
+ self.model = load_model()
46
+ self.model_name = "VGG-Face"
47
+ self.input_shape = (224, 224)
48
+ self.output_shape = 4096
49
+
50
+ def forward(self, img: np.ndarray) -> List[float]:
51
+ """
52
+ Generates embeddings using the VGG-Face model.
53
+ This method incorporates an additional normalization layer,
54
+ necessitating the override of the forward method.
55
+
56
+ Args:
57
+ img (np.ndarray): pre-loaded image in BGR
58
+ Returns
59
+ embeddings (list): multi-dimensional vector
60
+ """
61
+ # model.predict causes memory issue when it is called in a for loop
62
+ # embedding = model.predict(img, verbose=0)[0].tolist()
63
+
64
+ # having normalization layer in descriptor troubles for some gpu users (e.g. issue 957, 966)
65
+ # instead we are now calculating it with traditional way not with keras backend
66
+ embedding = self.model(img, training=False).numpy()[0].tolist()
67
+ embedding = verification.l2_normalize(embedding)
68
+ return embedding.tolist()
69
+
70
+
71
+ def base_model() -> Sequential:
72
+ """
73
+ Base model of VGG-Face being used for classification - not to find embeddings
74
+ Returns:
75
+ model (Sequential): model was trained to classify 2622 identities
76
+ """
77
+ model = Sequential()
78
+ model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
79
+ model.add(Convolution2D(64, (3, 3), activation="relu"))
80
+ model.add(ZeroPadding2D((1, 1)))
81
+ model.add(Convolution2D(64, (3, 3), activation="relu"))
82
+ model.add(MaxPooling2D((2, 2), strides=(2, 2)))
83
+
84
+ model.add(ZeroPadding2D((1, 1)))
85
+ model.add(Convolution2D(128, (3, 3), activation="relu"))
86
+ model.add(ZeroPadding2D((1, 1)))
87
+ model.add(Convolution2D(128, (3, 3), activation="relu"))
88
+ model.add(MaxPooling2D((2, 2), strides=(2, 2)))
89
+
90
+ model.add(ZeroPadding2D((1, 1)))
91
+ model.add(Convolution2D(256, (3, 3), activation="relu"))
92
+ model.add(ZeroPadding2D((1, 1)))
93
+ model.add(Convolution2D(256, (3, 3), activation="relu"))
94
+ model.add(ZeroPadding2D((1, 1)))
95
+ model.add(Convolution2D(256, (3, 3), activation="relu"))
96
+ model.add(MaxPooling2D((2, 2), strides=(2, 2)))
97
+
98
+ model.add(ZeroPadding2D((1, 1)))
99
+ model.add(Convolution2D(512, (3, 3), activation="relu"))
100
+ model.add(ZeroPadding2D((1, 1)))
101
+ model.add(Convolution2D(512, (3, 3), activation="relu"))
102
+ model.add(ZeroPadding2D((1, 1)))
103
+ model.add(Convolution2D(512, (3, 3), activation="relu"))
104
+ model.add(MaxPooling2D((2, 2), strides=(2, 2)))
105
+
106
+ model.add(ZeroPadding2D((1, 1)))
107
+ model.add(Convolution2D(512, (3, 3), activation="relu"))
108
+ model.add(ZeroPadding2D((1, 1)))
109
+ model.add(Convolution2D(512, (3, 3), activation="relu"))
110
+ model.add(ZeroPadding2D((1, 1)))
111
+ model.add(Convolution2D(512, (3, 3), activation="relu"))
112
+ model.add(MaxPooling2D((2, 2), strides=(2, 2)))
113
+
114
+ model.add(Convolution2D(4096, (7, 7), activation="relu"))
115
+ model.add(Dropout(0.5))
116
+ model.add(Convolution2D(4096, (1, 1), activation="relu"))
117
+ model.add(Dropout(0.5))
118
+ model.add(Convolution2D(2622, (1, 1)))
119
+ model.add(Flatten())
120
+ model.add(Activation("softmax"))
121
+
122
+ return model
123
+
124
+
125
+ def load_model(
126
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/vgg_face_weights.h5",
127
+ ) -> Model:
128
+ """
129
+ Final VGG-Face model being used for finding embeddings
130
+ Returns:
131
+ model (Model): returning 4096 dimensional vectors
132
+ """
133
+
134
+ model = base_model()
135
+
136
+ home = folder_utils.get_deepface_home()
137
+ output = home + "/.deepface/weights/vgg_face_weights.h5"
138
+
139
+ if os.path.isfile(output) != True:
140
+ logger.info("vgg_face_weights.h5 will be downloaded...")
141
+ gdown.download(url, output, quiet=False)
142
+
143
+ model.load_weights(output)
144
+
145
+ # 2622d dimensional model
146
+ # vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
147
+
148
+ # 4096 dimensional model offers 6% to 14% increasement on accuracy!
149
+ # - softmax causes underfitting
150
+ # - added normalization layer to avoid underfitting with euclidean
151
+ # as described here: https://github.com/serengil/deepface/issues/944
152
+ base_model_output = Sequential()
153
+ base_model_output = Flatten()(model.layers[-5].output)
154
+ # keras backend's l2 normalization layer troubles some gpu users (e.g. issue 957, 966)
155
+ # base_model_output = Lambda(lambda x: K.l2_normalize(x, axis=1), name="norm_layer")(
156
+ # base_model_output
157
+ # )
158
+ vgg_face_descriptor = Model(inputs=model.input, outputs=base_model_output)
159
+
160
+ return vgg_face_descriptor
basemodels/__init__.py ADDED
File without changes
basemodels/__pycache__/ArcFace.cpython-312.pyc ADDED
Binary file (6.18 kB). View file
 
basemodels/__pycache__/DeepID.cpython-312.pyc ADDED
Binary file (3.71 kB). View file
 
basemodels/__pycache__/Dlib.cpython-312.pyc ADDED
Binary file (4 kB). View file
 
basemodels/__pycache__/Facenet.cpython-312.pyc ADDED
Binary file (54.9 kB). View file
 
basemodels/__pycache__/FbDeepFace.cpython-312.pyc ADDED
Binary file (4.6 kB). View file
 
basemodels/__pycache__/GhostFaceNet.cpython-312.pyc ADDED
Binary file (11.3 kB). View file
 
basemodels/__pycache__/OpenFace.cpython-312.pyc ADDED
Binary file (17.3 kB). View file
 
basemodels/__pycache__/SFace.cpython-312.pyc ADDED
Binary file (3.63 kB). View file
 
basemodels/__pycache__/VGGFace.cpython-312.pyc ADDED
Binary file (7.01 kB). View file
 
basemodels/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (180 Bytes). View file
 
commons/__init__.py ADDED
File without changes
commons/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (177 Bytes). View file
 
commons/__pycache__/file_utils.cpython-312.pyc ADDED
Binary file (1.43 kB). View file
 
commons/__pycache__/folder_utils.cpython-312.pyc ADDED
Binary file (1.72 kB). View file
 
commons/__pycache__/image_utils.cpython-312.pyc ADDED
Binary file (6.39 kB). View file
 
commons/__pycache__/logger.cpython-312.pyc ADDED
Binary file (3.24 kB). View file
 
commons/__pycache__/os_path.cpython-312.pyc ADDED
Binary file (919 Bytes). View file