GoodWin commited on
Commit
0f691e2
·
1 Parent(s): 51841ef
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. FaceLandmarkDetection/Dockerfile +33 -0
  2. FaceLandmarkDetection/LICENSE +29 -0
  3. FaceLandmarkDetection/README.md +183 -0
  4. FaceLandmarkDetection/docs/images/2dlandmarks.png +0 -0
  5. FaceLandmarkDetection/docs/images/face-alignment-adrian.gif +0 -0
  6. FaceLandmarkDetection/face_alignment/__init__.py +7 -0
  7. FaceLandmarkDetection/face_alignment/__pycache__/__init__.cpython-36.pyc +0 -0
  8. FaceLandmarkDetection/face_alignment/__pycache__/__init__.cpython-37.pyc +0 -0
  9. FaceLandmarkDetection/face_alignment/__pycache__/api.cpython-36.pyc +0 -0
  10. FaceLandmarkDetection/face_alignment/__pycache__/api.cpython-37.pyc +0 -0
  11. FaceLandmarkDetection/face_alignment/__pycache__/models.cpython-36.pyc +0 -0
  12. FaceLandmarkDetection/face_alignment/__pycache__/models.cpython-37.pyc +0 -0
  13. FaceLandmarkDetection/face_alignment/__pycache__/utils.cpython-36.pyc +0 -0
  14. FaceLandmarkDetection/face_alignment/__pycache__/utils.cpython-37.pyc +0 -0
  15. FaceLandmarkDetection/face_alignment/api.py +207 -0
  16. FaceLandmarkDetection/face_alignment/detection/__init__.py +1 -0
  17. FaceLandmarkDetection/face_alignment/detection/__pycache__/__init__.cpython-36.pyc +0 -0
  18. FaceLandmarkDetection/face_alignment/detection/__pycache__/__init__.cpython-37.pyc +0 -0
  19. FaceLandmarkDetection/face_alignment/detection/__pycache__/core.cpython-36.pyc +0 -0
  20. FaceLandmarkDetection/face_alignment/detection/__pycache__/core.cpython-37.pyc +0 -0
  21. FaceLandmarkDetection/face_alignment/detection/core.py +131 -0
  22. FaceLandmarkDetection/face_alignment/detection/dlib/__init__.py +1 -0
  23. FaceLandmarkDetection/face_alignment/detection/dlib/dlib_detector.py +68 -0
  24. FaceLandmarkDetection/face_alignment/detection/folder/__init__.py +1 -0
  25. FaceLandmarkDetection/face_alignment/detection/folder/folder_detector.py +53 -0
  26. FaceLandmarkDetection/face_alignment/detection/sfd/__init__.py +1 -0
  27. FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/__init__.cpython-36.pyc +0 -0
  28. FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/__init__.cpython-37.pyc +0 -0
  29. FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/bbox.cpython-36.pyc +0 -0
  30. FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/bbox.cpython-37.pyc +0 -0
  31. FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/detect.cpython-36.pyc +0 -0
  32. FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/detect.cpython-37.pyc +0 -0
  33. FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/net_s3fd.cpython-36.pyc +0 -0
  34. FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/net_s3fd.cpython-37.pyc +0 -0
  35. FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/sfd_detector.cpython-36.pyc +0 -0
  36. FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/sfd_detector.cpython-37.pyc +0 -0
  37. FaceLandmarkDetection/face_alignment/detection/sfd/bbox.py +109 -0
  38. FaceLandmarkDetection/face_alignment/detection/sfd/detect.py +75 -0
  39. FaceLandmarkDetection/face_alignment/detection/sfd/net_s3fd.py +129 -0
  40. FaceLandmarkDetection/face_alignment/detection/sfd/sfd_detector.py +51 -0
  41. FaceLandmarkDetection/face_alignment/models.py +261 -0
  42. FaceLandmarkDetection/face_alignment/utils.py +274 -0
  43. FaceLandmarkDetection/get_face_landmark.py +46 -0
  44. FaceLandmarkDetection/setup.cfg +32 -0
  45. FaceLandmarkDetection/setup.py +83 -0
  46. FaceLandmarkDetection/test/assets/aflw-test.jpg +0 -0
  47. FaceLandmarkDetection/test/facealignment_test.py +11 -0
  48. FaceLandmarkDetection/test/smoke_test.py +2 -0
  49. FaceLandmarkDetection/test/test_utils.py +36 -0
  50. FaceLandmarkDetection/tox.ini +3 -0
FaceLandmarkDetection/Dockerfile ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Based on https://github.com/pytorch/pytorch/blob/master/Dockerfile
2
+ FROM nvidia/cuda:9.2-cudnn7-devel-ubuntu16.04
3
+
4
+ RUN apt-get update && apt-get install -y --no-install-recommends \
5
+ build-essential \
6
+ cmake \
7
+ git \
8
+ curl \
9
+ vim \
10
+ ca-certificates \
11
+ libboost-all-dev \
12
+ python-qt4 \
13
+ libjpeg-dev \
14
+ libpng-dev &&\
15
+ rm -rf /var/lib/apt/lists/*
16
+
17
+ RUN curl -o ~/miniconda.sh -O https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
18
+ chmod +x ~/miniconda.sh && \
19
+ ~/miniconda.sh -b -p /opt/conda && \
20
+ rm ~/miniconda.sh
21
+
22
+ ENV PATH /opt/conda/bin:$PATH
23
+
24
+ RUN conda config --set always_yes yes --set changeps1 no && conda update -q conda
25
+ RUN conda install pytorch torchvision cuda92 -c pytorch
26
+
27
+ # Install face-alignment package
28
+ WORKDIR /workspace
29
+ RUN chmod -R a+w /workspace
30
+ RUN git clone https://github.com/1adrianb/face-alignment
31
+ WORKDIR /workspace/face-alignment
32
+ RUN pip install -r requirements.txt
33
+ RUN python setup.py install
FaceLandmarkDetection/LICENSE ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BSD 3-Clause License
2
+
3
+ Copyright (c) 2017, Adrian Bulat
4
+ All rights reserved.
5
+
6
+ Redistribution and use in source and binary forms, with or without
7
+ modification, are permitted provided that the following conditions are met:
8
+
9
+ * Redistributions of source code must retain the above copyright notice, this
10
+ list of conditions and the following disclaimer.
11
+
12
+ * Redistributions in binary form must reproduce the above copyright notice,
13
+ this list of conditions and the following disclaimer in the documentation
14
+ and/or other materials provided with the distribution.
15
+
16
+ * Neither the name of the copyright holder nor the names of its
17
+ contributors may be used to endorse or promote products derived from
18
+ this software without specific prior written permission.
19
+
20
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FaceLandmarkDetection/README.md ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Face Recognition
2
+
3
+ Detect facial landmarks from Python using the world's most accurate face alignment network, capable of detecting points in both 2D and 3D coordinates.
4
+
5
+ Build using [FAN](https://www.adrianbulat.com)'s state-of-the-art deep learning based face alignment method.
6
+
7
+ <p align="center"><img src="docs/images/face-alignment-adrian.gif" /></p>
8
+
9
+ **Note:** The lua version is available [here](https://github.com/1adrianb/2D-and-3D-face-alignment).
10
+
11
+ For numerical evaluations it is highly recommended to use the lua version which uses indentical models with the ones evaluated in the paper. More models will be added soon.
12
+
13
+ [![License](https://img.shields.io/badge/License-BSD%203--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) [![Build Status](https://travis-ci.com/1adrianb/face-alignment.svg?branch=master)](https://travis-ci.com/1adrianb/face-alignment) [![Anaconda-Server Badge](https://anaconda.org/1adrianb/face_alignment/badges/version.svg)](https://anaconda.org/1adrianb/face_alignment)
14
+ [![PyPI](https://img.shields.io/pypi/v/nine.svg?style=flat-square)](https://pypi.org/project/face-alignment/)
15
+
16
+ ## Features
17
+
18
+ #### Detect 2D facial landmarks in pictures
19
+
20
+ <p align='center'>
21
+ <img src='docs/images/2dlandmarks.png' title='3D-FAN-Full example' style='max-width:600px'></img>
22
+ </p>
23
+
24
+ ```python
25
+ import face_alignment
26
+ from skimage import io
27
+
28
+ fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
29
+
30
+ input = io.imread('../test/assets/aflw-test.jpg')
31
+ preds = fa.get_landmarks(input)
32
+ ```
33
+
34
+ #### Detect 3D facial landmarks in pictures
35
+
36
+ <p align='center'>
37
+ <img src='https://www.adrianbulat.com/images/image-z-examples.png' title='3D-FAN-Full example' style='max-width:600px'></img>
38
+ </p>
39
+
40
+ ```python
41
+ import face_alignment
42
+ from skimage import io
43
+
44
+ fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, flip_input=False)
45
+
46
+ input = io.imread('../test/assets/aflw-test.jpg')
47
+ preds = fa.get_landmarks(input)
48
+ ```
49
+
50
+ #### Process an entire directory in one go
51
+
52
+ ```python
53
+ import face_alignment
54
+ from skimage import io
55
+
56
+ fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
57
+
58
+ preds = fa.get_landmarks_from_directory('../test/assets/')
59
+ ```
60
+
61
+ #### Detect the landmarks using a specific face detector.
62
+
63
+ By default the package will use the SFD face detector. However the users can alternatively use dlib or pre-existing ground truth bounding boxes.
64
+
65
+ ```python
66
+ import face_alignment
67
+
68
+ # sfd for SFD, dlib for Dlib and folder for existing bounding boxes.
69
+ fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, face_detector='sfd')
70
+ ```
71
+
72
+ #### Running on CPU/GPU
73
+ In order to specify the device (GPU or CPU) on which the code will run one can explicitly pass the device flag:
74
+
75
+ ```python
76
+ import face_alignment
77
+
78
+ # cuda for CUDA
79
+ fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device='cpu')
80
+ ```
81
+
82
+ Please also see the ``examples`` folder
83
+
84
+ ## Installation
85
+
86
+ ### Requirements
87
+
88
+ * Python 3.5+ or Python 2.7 (it may work with other versions too)
89
+ * Linux, Windows or macOS
90
+ * pytorch (>=1.0)
91
+
92
+ While not required, for optimal performance(especially for the detector) it is **highly** recommended to run the code using a CUDA enabled GPU.
93
+
94
+ ### Binaries
95
+
96
+ The easiest way to install it is using either pip or conda:
97
+
98
+ | **Using pip** | **Using conda** |
99
+ |------------------------------|--------------------------------------------|
100
+ | `pip install face-alignment` | `conda install -c 1adrianb face_alignment` |
101
+ | | |
102
+
103
+ Alternatively, bellow, you can find instruction to build it from source.
104
+
105
+ ### From source
106
+
107
+ Install pytorch and pytorch dependencies. Instructions taken from [pytorch readme](https://github.com/pytorch/pytorch). For a more updated version check the framework github page.
108
+
109
+ On Linux
110
+ ```bash
111
+ export CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" # [anaconda root directory]
112
+
113
+ # Install basic dependencies
114
+ conda install numpy pyyaml mkl setuptools cmake gcc cffi
115
+
116
+ # Add LAPACK support for the GPU
117
+ conda install -c soumith magma-cuda80 # or magma-cuda75 if CUDA 7.5
118
+ ```
119
+
120
+ On OSX
121
+ ```bash
122
+ export CMAKE_PREFIX_PATH=[anaconda root directory]
123
+ conda install numpy pyyaml setuptools cmake cffi
124
+ ```
125
+ #### Get the PyTorch source
126
+ ```bash
127
+ git clone --recursive https://github.com/pytorch/pytorch
128
+ ```
129
+
130
+ #### Install PyTorch
131
+ On Linux
132
+ ```bash
133
+ python setup.py install
134
+ ```
135
+
136
+ On OSX
137
+ ```bash
138
+ MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py install
139
+ ```
140
+
141
+ #### Get the Face Alignment source code
142
+ ```bash
143
+ git clone https://github.com/1adrianb/face-alignment
144
+ ```
145
+ #### Install the Face Alignment lib
146
+ ```bash
147
+ pip install -r requirements.txt
148
+ python setup.py install
149
+ ```
150
+
151
+ ### Docker image
152
+
153
+ A Dockerfile is provided to build images with cuda support and cudnn v5. For more instructions about running and building a docker image check the orginal Docker documentation.
154
+ ```
155
+ docker build -t face-alignment .
156
+ ```
157
+
158
+ ## How does it work?
159
+
160
+ While here the work is presented as a black-box, if you want to know more about the intrisecs of the method please check the original paper either on arxiv or my [webpage](https://www.adrianbulat.com).
161
+
162
+ ## Contributions
163
+
164
+ All contributions are welcomed. If you encounter any issue (including examples of images where it fails) feel free to open an issue.
165
+
166
+ ## Citation
167
+
168
+ ```
169
+ @inproceedings{bulat2017far,
170
+ title={How far are we from solving the 2D \& 3D Face Alignment problem? (and a dataset of 230,000 3D facial landmarks)},
171
+ author={Bulat, Adrian and Tzimiropoulos, Georgios},
172
+ booktitle={International Conference on Computer Vision},
173
+ year={2017}
174
+ }
175
+ ```
176
+
177
+ For citing dlib, pytorch or any other packages used here please check the original page of their respective authors.
178
+
179
+ ## Acknowledgements
180
+
181
+ * To the [pytorch](http://pytorch.org/) team for providing such an awesome deeplearning framework
182
+ * To [my supervisor](http://www.cs.nott.ac.uk/~pszyt/) for his patience and suggestions.
183
+ * To all other python developers that made available the rest of the packages used in this repository.
FaceLandmarkDetection/docs/images/2dlandmarks.png ADDED
FaceLandmarkDetection/docs/images/face-alignment-adrian.gif ADDED
FaceLandmarkDetection/face_alignment/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ __author__ = """Adrian Bulat"""
4
+ __email__ = 'adrian.bulat@nottingham.ac.uk'
5
+ __version__ = '1.0.1'
6
+
7
+ from .api import FaceAlignment, LandmarksType, NetworkSize
FaceLandmarkDetection/face_alignment/__pycache__/__init__.cpython-36.pyc ADDED
Binary file (328 Bytes). View file
 
FaceLandmarkDetection/face_alignment/__pycache__/__init__.cpython-37.pyc ADDED
Binary file (332 Bytes). View file
 
FaceLandmarkDetection/face_alignment/__pycache__/api.cpython-36.pyc ADDED
Binary file (6.92 kB). View file
 
FaceLandmarkDetection/face_alignment/__pycache__/api.cpython-37.pyc ADDED
Binary file (6.91 kB). View file
 
FaceLandmarkDetection/face_alignment/__pycache__/models.cpython-36.pyc ADDED
Binary file (7.13 kB). View file
 
FaceLandmarkDetection/face_alignment/__pycache__/models.cpython-37.pyc ADDED
Binary file (7.13 kB). View file
 
FaceLandmarkDetection/face_alignment/__pycache__/utils.cpython-36.pyc ADDED
Binary file (8.62 kB). View file
 
FaceLandmarkDetection/face_alignment/__pycache__/utils.cpython-37.pyc ADDED
Binary file (8.6 kB). View file
 
FaceLandmarkDetection/face_alignment/api.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+ import os
3
+ import torch
4
+ from torch.utils.model_zoo import load_url
5
+ from enum import Enum
6
+ from skimage import io
7
+ from skimage import color
8
+ import numpy as np
9
+ import cv2
10
+ try:
11
+ import urllib.request as request_file
12
+ except BaseException:
13
+ import urllib as request_file
14
+
15
+ from .models import FAN, ResNetDepth
16
+ from .utils import *
17
+
18
+
19
+ class LandmarksType(Enum):
20
+ """Enum class defining the type of landmarks to detect.
21
+
22
+ ``_2D`` - the detected points ``(x,y)`` are detected in a 2D space and follow the visible contour of the face
23
+ ``_2halfD`` - this points represent the projection of the 3D points into 3D
24
+ ``_3D`` - detect the points ``(x,y,z)``` in a 3D space
25
+
26
+ """
27
+ _2D = 1
28
+ _2halfD = 2
29
+ _3D = 3
30
+
31
+
32
+ class NetworkSize(Enum):
33
+ # TINY = 1
34
+ # SMALL = 2
35
+ # MEDIUM = 3
36
+ LARGE = 4
37
+
38
+ def __new__(cls, value):
39
+ member = object.__new__(cls)
40
+ member._value_ = value
41
+ return member
42
+
43
+ def __int__(self):
44
+ return self.value
45
+
46
+ models_urls = {
47
+ '2DFAN-4': 'https://www.adrianbulat.com/downloads/python-fan/2DFAN4-11f355bf06.pth.tar',
48
+ '3DFAN-4': 'https://www.adrianbulat.com/downloads/python-fan/3DFAN4-7835d9f11d.pth.tar',
49
+ 'depth': 'https://www.adrianbulat.com/downloads/python-fan/depth-2a464da4ea.pth.tar',
50
+ }
51
+
52
+
53
+ class FaceAlignment:
54
+ def __init__(self, landmarks_type, network_size=NetworkSize.LARGE,
55
+ device='cuda', flip_input=False, face_detector='sfd', verbose=False):
56
+ self.device = device
57
+ self.flip_input = flip_input
58
+ self.landmarks_type = landmarks_type
59
+ self.verbose = verbose
60
+
61
+ network_size = int(network_size)
62
+
63
+ if 'cuda' in device:
64
+ torch.backends.cudnn.benchmark = True
65
+
66
+ # Get the face detector
67
+ face_detector_module = __import__('face_alignment.detection.' + face_detector,
68
+ globals(), locals(), [face_detector], 0)
69
+ self.face_detector = face_detector_module.FaceDetector(device=device, verbose=verbose)
70
+
71
+ # Initialise the face alignemnt networks
72
+ self.face_alignment_net = FAN(network_size)
73
+ if landmarks_type == LandmarksType._2D:
74
+ network_name = '2DFAN-' + str(network_size)
75
+ else:
76
+ network_name = '3DFAN-' + str(network_size)
77
+
78
+ fan_weights = load_url(models_urls[network_name], map_location=lambda storage, loc: storage)
79
+ self.face_alignment_net.load_state_dict(fan_weights)
80
+
81
+ self.face_alignment_net.to(device)
82
+ self.face_alignment_net.eval()
83
+
84
+ # Initialiase the depth prediciton network
85
+ if landmarks_type == LandmarksType._3D:
86
+ self.depth_prediciton_net = ResNetDepth()
87
+
88
+ depth_weights = load_url(models_urls['depth'], map_location=lambda storage, loc: storage)
89
+ depth_dict = {
90
+ k.replace('module.', ''): v for k,
91
+ v in depth_weights['state_dict'].items()}
92
+ self.depth_prediciton_net.load_state_dict(depth_dict)
93
+
94
+ self.depth_prediciton_net.to(device)
95
+ self.depth_prediciton_net.eval()
96
+
97
+ def get_landmarks(self, image_or_path, detected_faces=None):
98
+ """Deprecated, please use get_landmarks_from_image
99
+
100
+ Arguments:
101
+ image_or_path {string or numpy.array or torch.tensor} -- The input image or path to it.
102
+
103
+ Keyword Arguments:
104
+ detected_faces {list of numpy.array} -- list of bounding boxes, one for each face found
105
+ in the image (default: {None})
106
+ """
107
+ return self.get_landmarks_from_image(image_or_path, detected_faces)
108
+
109
+ def get_landmarks_from_image(self, image_or_path, detected_faces=None):
110
+ """Predict the landmarks for each face present in the image.
111
+
112
+ This function predicts a set of 68 2D or 3D images, one for each image present.
113
+ If detect_faces is None the method will also run a face detector.
114
+
115
+ Arguments:
116
+ image_or_path {string or numpy.array or torch.tensor} -- The input image or path to it.
117
+
118
+ Keyword Arguments:
119
+ detected_faces {list of numpy.array} -- list of bounding boxes, one for each face found
120
+ in the image (default: {None})
121
+ """
122
+ if isinstance(image_or_path, str):
123
+ try:
124
+ image = io.imread(image_or_path)
125
+ except IOError:
126
+ print("error opening file :: ", image_or_path)
127
+ return None
128
+ else:
129
+ image = image_or_path
130
+
131
+ if image.ndim == 2:
132
+ image = color.gray2rgb(image)
133
+ elif image.ndim == 4:
134
+ image = image[..., :3]
135
+
136
+ if detected_faces is None:
137
+ detected_faces = self.face_detector.detect_from_image(image[..., ::-1].copy())
138
+
139
+ if len(detected_faces) == 0:
140
+ print("Warning: No faces were detected.")
141
+ return None
142
+
143
+ torch.set_grad_enabled(False)
144
+ landmarks = []
145
+ for i, d in enumerate(detected_faces):
146
+ center = torch.FloatTensor(
147
+ [d[2] - (d[2] - d[0]) / 2.0, d[3] - (d[3] - d[1]) / 2.0])
148
+ center[1] = center[1] - (d[3] - d[1]) * 0.12
149
+ scale = (d[2] - d[0] + d[3] - d[1]) / self.face_detector.reference_scale
150
+
151
+ inp = crop(image, center, scale)
152
+ inp = torch.from_numpy(inp.transpose(
153
+ (2, 0, 1))).float()
154
+
155
+ inp = inp.to(self.device)
156
+ inp.div_(255.0).unsqueeze_(0)
157
+
158
+ out = self.face_alignment_net(inp)[-1].detach()
159
+ if self.flip_input:
160
+ out += flip(self.face_alignment_net(flip(inp))
161
+ [-1].detach(), is_label=True)
162
+ out = out.cpu()
163
+
164
+ pts, pts_img = get_preds_fromhm(out, center, scale)
165
+ pts, pts_img = pts.view(68, 2) * 4, pts_img.view(68, 2)
166
+
167
+ if self.landmarks_type == LandmarksType._3D:
168
+ heatmaps = np.zeros((68, 256, 256), dtype=np.float32)
169
+ for i in range(68):
170
+ if pts[i, 0] > 0:
171
+ heatmaps[i] = draw_gaussian(
172
+ heatmaps[i], pts[i], 2)
173
+ heatmaps = torch.from_numpy(
174
+ heatmaps).unsqueeze_(0)
175
+
176
+ heatmaps = heatmaps.to(self.device)
177
+ depth_pred = self.depth_prediciton_net(
178
+ torch.cat((inp, heatmaps), 1)).data.cpu().view(68, 1)
179
+ pts_img = torch.cat(
180
+ (pts_img, depth_pred * (1.0 / (256.0 / (200.0 * scale)))), 1)
181
+
182
+ landmarks.append(pts_img.numpy())
183
+
184
+ return landmarks
185
+
186
+ def get_landmarks_from_directory(self, path, extensions=['.jpg', '.png'], recursive=True, show_progress_bar=True):
187
+ detected_faces = self.face_detector.detect_from_directory(path, extensions, recursive, show_progress_bar)
188
+
189
+ predictions = {}
190
+ for image_path, bounding_boxes in detected_faces.items():
191
+ image = io.imread(image_path)
192
+ preds = self.get_landmarks_from_image(image, bounding_boxes)
193
+ predictions[image_path] = preds
194
+
195
+ return predictions
196
+
197
+ @staticmethod
198
+ def remove_models(self):
199
+ base_path = os.path.join(appdata_dir('face_alignment'), "data")
200
+ for data_model in os.listdir(base_path):
201
+ file_path = os.path.join(base_path, data_model)
202
+ try:
203
+ if os.path.isfile(file_path):
204
+ print('Removing ' + data_model + ' ...')
205
+ os.unlink(file_path)
206
+ except Exception as e:
207
+ print(e)
FaceLandmarkDetection/face_alignment/detection/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .core import FaceDetector
FaceLandmarkDetection/face_alignment/detection/__pycache__/__init__.cpython-36.pyc ADDED
Binary file (186 Bytes). View file
 
FaceLandmarkDetection/face_alignment/detection/__pycache__/__init__.cpython-37.pyc ADDED
Binary file (190 Bytes). View file
 
FaceLandmarkDetection/face_alignment/detection/__pycache__/core.cpython-36.pyc ADDED
Binary file (4.88 kB). View file
 
FaceLandmarkDetection/face_alignment/detection/__pycache__/core.cpython-37.pyc ADDED
Binary file (4.87 kB). View file
 
FaceLandmarkDetection/face_alignment/detection/core.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import glob
3
+ from tqdm import tqdm
4
+ import numpy as np
5
+ import torch
6
+ import cv2
7
+ from skimage import io
8
+
9
+
10
+ class FaceDetector(object):
11
+ """An abstract class representing a face detector.
12
+
13
+ Any other face detection implementation must subclass it. All subclasses
14
+ must implement ``detect_from_image``, that return a list of detected
15
+ bounding boxes. Optionally, for speed considerations detect from path is
16
+ recommended.
17
+ """
18
+
19
+ def __init__(self, device, verbose):
20
+ self.device = device
21
+ self.verbose = verbose
22
+
23
+ if verbose:
24
+ if 'cpu' in device:
25
+ logger = logging.getLogger(__name__)
26
+ logger.warning("Detection running on CPU, this may be potentially slow.")
27
+
28
+ if 'cpu' not in device and 'cuda' not in device:
29
+ if verbose:
30
+ logger.error("Expected values for device are: {cpu, cuda} but got: %s", device)
31
+ raise ValueError
32
+
33
+ def detect_from_image(self, tensor_or_path):
34
+ """Detects faces in a given image.
35
+
36
+ This function detects the faces present in a provided BGR(usually)
37
+ image. The input can be either the image itself or the path to it.
38
+
39
+ Arguments:
40
+ tensor_or_path {numpy.ndarray, torch.tensor or string} -- the path
41
+ to an image or the image itself.
42
+
43
+ Example::
44
+
45
+ >>> path_to_image = 'data/image_01.jpg'
46
+ ... detected_faces = detect_from_image(path_to_image)
47
+ [A list of bounding boxes (x1, y1, x2, y2)]
48
+ >>> image = cv2.imread(path_to_image)
49
+ ... detected_faces = detect_from_image(image)
50
+ [A list of bounding boxes (x1, y1, x2, y2)]
51
+
52
+ """
53
+ raise NotImplementedError
54
+
55
+ def detect_from_directory(self, path, extensions=['.jpg', '.png'], recursive=False, show_progress_bar=True):
56
+ """Detects faces from all the images present in a given directory.
57
+
58
+ Arguments:
59
+ path {string} -- a string containing a path that points to the folder containing the images
60
+
61
+ Keyword Arguments:
62
+ extensions {list} -- list of string containing the extensions to be
63
+ consider in the following format: ``.extension_name`` (default:
64
+ {['.jpg', '.png']}) recursive {bool} -- option wherever to scan the
65
+ folder recursively (default: {False}) show_progress_bar {bool} --
66
+ display a progressbar (default: {True})
67
+
68
+ Example:
69
+ >>> directory = 'data'
70
+ ... detected_faces = detect_from_directory(directory)
71
+ {A dictionary of [lists containing bounding boxes(x1, y1, x2, y2)]}
72
+
73
+ """
74
+ if self.verbose:
75
+ logger = logging.getLogger(__name__)
76
+
77
+ if len(extensions) == 0:
78
+ if self.verbose:
79
+ logger.error("Expected at list one extension, but none was received.")
80
+ raise ValueError
81
+
82
+ if self.verbose:
83
+ logger.info("Constructing the list of images.")
84
+ additional_pattern = '/**/*' if recursive else '/*'
85
+ files = []
86
+ for extension in extensions:
87
+ files.extend(glob.glob(path + additional_pattern + extension, recursive=recursive))
88
+
89
+ if self.verbose:
90
+ logger.info("Finished searching for images. %s images found", len(files))
91
+ logger.info("Preparing to run the detection.")
92
+
93
+ predictions = {}
94
+ for image_path in tqdm(files, disable=not show_progress_bar):
95
+ if self.verbose:
96
+ logger.info("Running the face detector on image: %s", image_path)
97
+ predictions[image_path] = self.detect_from_image(image_path)
98
+
99
+ if self.verbose:
100
+ logger.info("The detector was successfully run on all %s images", len(files))
101
+
102
+ return predictions
103
+
104
+ @property
105
+ def reference_scale(self):
106
+ raise NotImplementedError
107
+
108
+ @property
109
+ def reference_x_shift(self):
110
+ raise NotImplementedError
111
+
112
+ @property
113
+ def reference_y_shift(self):
114
+ raise NotImplementedError
115
+
116
+ @staticmethod
117
+ def tensor_or_path_to_ndarray(tensor_or_path, rgb=True):
118
+ """Convert path (represented as a string) or torch.tensor to a numpy.ndarray
119
+
120
+ Arguments:
121
+ tensor_or_path {numpy.ndarray, torch.tensor or string} -- path to the image, or the image itself
122
+ """
123
+ if isinstance(tensor_or_path, str):
124
+ return cv2.imread(tensor_or_path) if not rgb else io.imread(tensor_or_path)
125
+ elif torch.is_tensor(tensor_or_path):
126
+ # Call cpu in case its coming from cuda
127
+ return tensor_or_path.cpu().numpy()[..., ::-1].copy() if not rgb else tensor_or_path.cpu().numpy()
128
+ elif isinstance(tensor_or_path, np.ndarray):
129
+ return tensor_or_path[..., ::-1].copy() if not rgb else tensor_or_path
130
+ else:
131
+ raise TypeError
FaceLandmarkDetection/face_alignment/detection/dlib/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .dlib_detector import DlibDetector as FaceDetector
FaceLandmarkDetection/face_alignment/detection/dlib/dlib_detector.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import dlib
4
+
5
+ try:
6
+ import urllib.request as request_file
7
+ except BaseException:
8
+ import urllib as request_file
9
+
10
+ from ..core import FaceDetector
11
+ from ...utils import appdata_dir
12
+
13
+
14
+ class DlibDetector(FaceDetector):
15
+ def __init__(self, device, path_to_detector=None, verbose=False):
16
+ super().__init__(device, verbose)
17
+
18
+ print('Warning: this detector is deprecated. Please use a different one, i.e.: S3FD.')
19
+ base_path = os.path.join(appdata_dir('face_alignment'), "data")
20
+
21
+ # Initialise the face detector
22
+ if 'cuda' in device:
23
+ if path_to_detector is None:
24
+ path_to_detector = os.path.join(
25
+ base_path, "mmod_human_face_detector.dat")
26
+
27
+ if not os.path.isfile(path_to_detector):
28
+ print("Downloading the face detection CNN. Please wait...")
29
+
30
+ path_to_temp_detector = os.path.join(
31
+ base_path, "mmod_human_face_detector.dat.download")
32
+
33
+ if os.path.isfile(path_to_temp_detector):
34
+ os.remove(os.path.join(path_to_temp_detector))
35
+
36
+ request_file.urlretrieve(
37
+ "https://www.adrianbulat.com/downloads/dlib/mmod_human_face_detector.dat",
38
+ os.path.join(path_to_temp_detector))
39
+
40
+ os.rename(os.path.join(path_to_temp_detector), os.path.join(path_to_detector))
41
+
42
+ self.face_detector = dlib.cnn_face_detection_model_v1(path_to_detector)
43
+ else:
44
+ self.face_detector = dlib.get_frontal_face_detector()
45
+
46
+ def detect_from_image(self, tensor_or_path):
47
+ image = self.tensor_or_path_to_ndarray(tensor_or_path, rgb=False)
48
+
49
+ detected_faces = self.face_detector(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
50
+
51
+ if 'cuda' not in self.device:
52
+ detected_faces = [[d.left(), d.top(), d.right(), d.bottom()] for d in detected_faces]
53
+ else:
54
+ detected_faces = [[d.rect.left(), d.rect.top(), d.rect.right(), d.rect.bottom()] for d in detected_faces]
55
+
56
+ return detected_faces
57
+
58
+ @property
59
+ def reference_scale(self):
60
+ return 195
61
+
62
+ @property
63
+ def reference_x_shift(self):
64
+ return 0
65
+
66
+ @property
67
+ def reference_y_shift(self):
68
+ return 0
FaceLandmarkDetection/face_alignment/detection/folder/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .folder_detector import FolderDetector as FaceDetector
FaceLandmarkDetection/face_alignment/detection/folder/folder_detector.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import torch
4
+
5
+ from ..core import FaceDetector
6
+
7
+
8
+ class FolderDetector(FaceDetector):
9
+ '''This is a simple helper module that assumes the faces were detected already
10
+ (either previously or are provided as ground truth).
11
+
12
+ The class expects to find the bounding boxes in the same format used by
13
+ the rest of face detectors, mainly ``list[(x1,y1,x2,y2),...]``.
14
+ For each image the detector will search for a file with the same name and with one of the
15
+ following extensions: .npy, .t7 or .pth
16
+
17
+ '''
18
+
19
+ def __init__(self, device, path_to_detector=None, verbose=False):
20
+ super(FolderDetector, self).__init__(device, verbose)
21
+
22
+ def detect_from_image(self, tensor_or_path):
23
+ # Only strings supported
24
+ if not isinstance(tensor_or_path, str):
25
+ raise ValueError
26
+
27
+ base_name = os.path.splitext(tensor_or_path)[0]
28
+
29
+ if os.path.isfile(base_name + '.npy'):
30
+ detected_faces = np.load(base_name + '.npy')
31
+ elif os.path.isfile(base_name + '.t7'):
32
+ detected_faces = torch.load(base_name + '.t7')
33
+ elif os.path.isfile(base_name + '.pth'):
34
+ detected_faces = torch.load(base_name + '.pth')
35
+ else:
36
+ raise FileNotFoundError
37
+
38
+ if not isinstance(detected_faces, list):
39
+ raise TypeError
40
+
41
+ return detected_faces
42
+
43
+ @property
44
+ def reference_scale(self):
45
+ return 195
46
+
47
+ @property
48
+ def reference_x_shift(self):
49
+ return 0
50
+
51
+ @property
52
+ def reference_y_shift(self):
53
+ return 0
FaceLandmarkDetection/face_alignment/detection/sfd/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .sfd_detector import SFDDetector as FaceDetector
FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/__init__.cpython-36.pyc ADDED
Binary file (211 Bytes). View file
 
FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/__init__.cpython-37.pyc ADDED
Binary file (215 Bytes). View file
 
FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/bbox.cpython-36.pyc ADDED
Binary file (3.79 kB). View file
 
FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/bbox.cpython-37.pyc ADDED
Binary file (3.8 kB). View file
 
FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/detect.cpython-36.pyc ADDED
Binary file (2.45 kB). View file
 
FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/detect.cpython-37.pyc ADDED
Binary file (2.46 kB). View file
 
FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/net_s3fd.cpython-36.pyc ADDED
Binary file (3.86 kB). View file
 
FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/net_s3fd.cpython-37.pyc ADDED
Binary file (3.88 kB). View file
 
FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/sfd_detector.cpython-36.pyc ADDED
Binary file (1.95 kB). View file
 
FaceLandmarkDetection/face_alignment/detection/sfd/__pycache__/sfd_detector.cpython-37.pyc ADDED
Binary file (1.95 kB). View file
 
FaceLandmarkDetection/face_alignment/detection/sfd/bbox.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+ import os
3
+ import sys
4
+ import cv2
5
+ import random
6
+ import datetime
7
+ import time
8
+ import math
9
+ import argparse
10
+ import numpy as np
11
+ import torch
12
+
13
+ try:
14
+ from iou import IOU
15
+ except BaseException:
16
+ # IOU cython speedup 10x
17
+ def IOU(ax1, ay1, ax2, ay2, bx1, by1, bx2, by2):
18
+ sa = abs((ax2 - ax1) * (ay2 - ay1))
19
+ sb = abs((bx2 - bx1) * (by2 - by1))
20
+ x1, y1 = max(ax1, bx1), max(ay1, by1)
21
+ x2, y2 = min(ax2, bx2), min(ay2, by2)
22
+ w = x2 - x1
23
+ h = y2 - y1
24
+ if w < 0 or h < 0:
25
+ return 0.0
26
+ else:
27
+ return 1.0 * w * h / (sa + sb - w * h)
28
+
29
+
30
+ def bboxlog(x1, y1, x2, y2, axc, ayc, aww, ahh):
31
+ xc, yc, ww, hh = (x2 + x1) / 2, (y2 + y1) / 2, x2 - x1, y2 - y1
32
+ dx, dy = (xc - axc) / aww, (yc - ayc) / ahh
33
+ dw, dh = math.log(ww / aww), math.log(hh / ahh)
34
+ return dx, dy, dw, dh
35
+
36
+
37
+ def bboxloginv(dx, dy, dw, dh, axc, ayc, aww, ahh):
38
+ xc, yc = dx * aww + axc, dy * ahh + ayc
39
+ ww, hh = math.exp(dw) * aww, math.exp(dh) * ahh
40
+ x1, x2, y1, y2 = xc - ww / 2, xc + ww / 2, yc - hh / 2, yc + hh / 2
41
+ return x1, y1, x2, y2
42
+
43
+
44
+ def nms(dets, thresh):
45
+ if 0 == len(dets):
46
+ return []
47
+ x1, y1, x2, y2, scores = dets[:, 0], dets[:, 1], dets[:, 2], dets[:, 3], dets[:, 4]
48
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
49
+ order = scores.argsort()[::-1]
50
+
51
+ keep = []
52
+ while order.size > 0:
53
+ i = order[0]
54
+ keep.append(i)
55
+ xx1, yy1 = np.maximum(x1[i], x1[order[1:]]), np.maximum(y1[i], y1[order[1:]])
56
+ xx2, yy2 = np.minimum(x2[i], x2[order[1:]]), np.minimum(y2[i], y2[order[1:]])
57
+
58
+ w, h = np.maximum(0.0, xx2 - xx1 + 1), np.maximum(0.0, yy2 - yy1 + 1)
59
+ ovr = w * h / (areas[i] + areas[order[1:]] - w * h)
60
+
61
+ inds = np.where(ovr <= thresh)[0]
62
+ order = order[inds + 1]
63
+
64
+ return keep
65
+
66
+
67
+ def encode(matched, priors, variances):
68
+ """Encode the variances from the priorbox layers into the ground truth boxes
69
+ we have matched (based on jaccard overlap) with the prior boxes.
70
+ Args:
71
+ matched: (tensor) Coords of ground truth for each prior in point-form
72
+ Shape: [num_priors, 4].
73
+ priors: (tensor) Prior boxes in center-offset form
74
+ Shape: [num_priors,4].
75
+ variances: (list[float]) Variances of priorboxes
76
+ Return:
77
+ encoded boxes (tensor), Shape: [num_priors, 4]
78
+ """
79
+
80
+ # dist b/t match center and prior's center
81
+ g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]
82
+ # encode variance
83
+ g_cxcy /= (variances[0] * priors[:, 2:])
84
+ # match wh / prior wh
85
+ g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
86
+ g_wh = torch.log(g_wh) / variances[1]
87
+ # return target for smooth_l1_loss
88
+ return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
89
+
90
+
91
+ def decode(loc, priors, variances):
92
+ """Decode locations from predictions using priors to undo
93
+ the encoding we did for offset regression at train time.
94
+ Args:
95
+ loc (tensor): location predictions for loc layers,
96
+ Shape: [num_priors,4]
97
+ priors (tensor): Prior boxes in center-offset form.
98
+ Shape: [num_priors,4].
99
+ variances: (list[float]) Variances of priorboxes
100
+ Return:
101
+ decoded bounding box predictions
102
+ """
103
+
104
+ boxes = torch.cat((
105
+ priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
106
+ priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
107
+ boxes[:, :2] -= boxes[:, 2:] / 2
108
+ boxes[:, 2:] += boxes[:, :2]
109
+ return boxes
FaceLandmarkDetection/face_alignment/detection/sfd/detect.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+ import os
5
+ import sys
6
+ import cv2
7
+ import random
8
+ import datetime
9
+ import math
10
+ import argparse
11
+ import numpy as np
12
+
13
+ import scipy.io as sio
14
+ import zipfile
15
+ from .net_s3fd import s3fd
16
+ from .bbox import *
17
+
18
+
19
+ def detect(net, img, device):
20
+ img = img - np.array([104, 117, 123])
21
+ img = img.transpose(2, 0, 1)
22
+ img = img.reshape((1,) + img.shape)
23
+
24
+ if 'cuda' in device:
25
+ torch.backends.cudnn.benchmark = True
26
+
27
+ img = torch.from_numpy(img).float().to(device)
28
+ BB, CC, HH, WW = img.size()
29
+ with torch.no_grad():
30
+ olist = net(img)
31
+
32
+ bboxlist = []
33
+ for i in range(len(olist) // 2):
34
+ olist[i * 2] = F.softmax(olist[i * 2], dim=1)
35
+ olist = [oelem.data.cpu() for oelem in olist]
36
+ for i in range(len(olist) // 2):
37
+ ocls, oreg = olist[i * 2], olist[i * 2 + 1]
38
+ FB, FC, FH, FW = ocls.size() # feature map size
39
+ stride = 2**(i + 2) # 4,8,16,32,64,128
40
+ anchor = stride * 4
41
+ poss = zip(*np.where(ocls[:, 1, :, :] > 0.05))
42
+ for Iindex, hindex, windex in poss:
43
+ axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride
44
+ score = ocls[0, 1, hindex, windex]
45
+ loc = oreg[0, :, hindex, windex].contiguous().view(1, 4)
46
+ priors = torch.Tensor([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]])
47
+ variances = [0.1, 0.2]
48
+ box = decode(loc, priors, variances)
49
+ x1, y1, x2, y2 = box[0] * 1.0
50
+ # cv2.rectangle(imgshow,(int(x1),int(y1)),(int(x2),int(y2)),(0,0,255),1)
51
+ bboxlist.append([x1, y1, x2, y2, score])
52
+ bboxlist = np.array(bboxlist)
53
+ if 0 == len(bboxlist):
54
+ bboxlist = np.zeros((1, 5))
55
+
56
+ return bboxlist
57
+
58
+
59
+ def flip_detect(net, img, device):
60
+ img = cv2.flip(img, 1)
61
+ b = detect(net, img, device)
62
+
63
+ bboxlist = np.zeros(b.shape)
64
+ bboxlist[:, 0] = img.shape[1] - b[:, 2]
65
+ bboxlist[:, 1] = b[:, 1]
66
+ bboxlist[:, 2] = img.shape[1] - b[:, 0]
67
+ bboxlist[:, 3] = b[:, 3]
68
+ bboxlist[:, 4] = b[:, 4]
69
+ return bboxlist
70
+
71
+
72
+ def pts_to_bb(pts):
73
+ min_x, min_y = np.min(pts, axis=0)
74
+ max_x, max_y = np.max(pts, axis=0)
75
+ return np.array([min_x, min_y, max_x, max_y])
FaceLandmarkDetection/face_alignment/detection/sfd/net_s3fd.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+
6
+ class L2Norm(nn.Module):
7
+ def __init__(self, n_channels, scale=1.0):
8
+ super(L2Norm, self).__init__()
9
+ self.n_channels = n_channels
10
+ self.scale = scale
11
+ self.eps = 1e-10
12
+ self.weight = nn.Parameter(torch.Tensor(self.n_channels))
13
+ self.weight.data *= 0.0
14
+ self.weight.data += self.scale
15
+
16
+ def forward(self, x):
17
+ norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
18
+ x = x / norm * self.weight.view(1, -1, 1, 1)
19
+ return x
20
+
21
+
22
+ class s3fd(nn.Module):
23
+ def __init__(self):
24
+ super(s3fd, self).__init__()
25
+ self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
26
+ self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
27
+
28
+ self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
29
+ self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
30
+
31
+ self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
32
+ self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
33
+ self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
34
+
35
+ self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)
36
+ self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
37
+ self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
38
+
39
+ self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
40
+ self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
41
+ self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
42
+
43
+ self.fc6 = nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=3)
44
+ self.fc7 = nn.Conv2d(1024, 1024, kernel_size=1, stride=1, padding=0)
45
+
46
+ self.conv6_1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
47
+ self.conv6_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1)
48
+
49
+ self.conv7_1 = nn.Conv2d(512, 128, kernel_size=1, stride=1, padding=0)
50
+ self.conv7_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)
51
+
52
+ self.conv3_3_norm = L2Norm(256, scale=10)
53
+ self.conv4_3_norm = L2Norm(512, scale=8)
54
+ self.conv5_3_norm = L2Norm(512, scale=5)
55
+
56
+ self.conv3_3_norm_mbox_conf = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1)
57
+ self.conv3_3_norm_mbox_loc = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1)
58
+ self.conv4_3_norm_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1)
59
+ self.conv4_3_norm_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
60
+ self.conv5_3_norm_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1)
61
+ self.conv5_3_norm_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
62
+
63
+ self.fc7_mbox_conf = nn.Conv2d(1024, 2, kernel_size=3, stride=1, padding=1)
64
+ self.fc7_mbox_loc = nn.Conv2d(1024, 4, kernel_size=3, stride=1, padding=1)
65
+ self.conv6_2_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1)
66
+ self.conv6_2_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
67
+ self.conv7_2_mbox_conf = nn.Conv2d(256, 2, kernel_size=3, stride=1, padding=1)
68
+ self.conv7_2_mbox_loc = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1)
69
+
70
+ def forward(self, x):
71
+ h = F.relu(self.conv1_1(x))
72
+ h = F.relu(self.conv1_2(h))
73
+ h = F.max_pool2d(h, 2, 2)
74
+
75
+ h = F.relu(self.conv2_1(h))
76
+ h = F.relu(self.conv2_2(h))
77
+ h = F.max_pool2d(h, 2, 2)
78
+
79
+ h = F.relu(self.conv3_1(h))
80
+ h = F.relu(self.conv3_2(h))
81
+ h = F.relu(self.conv3_3(h))
82
+ f3_3 = h
83
+ h = F.max_pool2d(h, 2, 2)
84
+
85
+ h = F.relu(self.conv4_1(h))
86
+ h = F.relu(self.conv4_2(h))
87
+ h = F.relu(self.conv4_3(h))
88
+ f4_3 = h
89
+ h = F.max_pool2d(h, 2, 2)
90
+
91
+ h = F.relu(self.conv5_1(h))
92
+ h = F.relu(self.conv5_2(h))
93
+ h = F.relu(self.conv5_3(h))
94
+ f5_3 = h
95
+ h = F.max_pool2d(h, 2, 2)
96
+
97
+ h = F.relu(self.fc6(h))
98
+ h = F.relu(self.fc7(h))
99
+ ffc7 = h
100
+ h = F.relu(self.conv6_1(h))
101
+ h = F.relu(self.conv6_2(h))
102
+ f6_2 = h
103
+ h = F.relu(self.conv7_1(h))
104
+ h = F.relu(self.conv7_2(h))
105
+ f7_2 = h
106
+
107
+ f3_3 = self.conv3_3_norm(f3_3)
108
+ f4_3 = self.conv4_3_norm(f4_3)
109
+ f5_3 = self.conv5_3_norm(f5_3)
110
+
111
+ cls1 = self.conv3_3_norm_mbox_conf(f3_3)
112
+ reg1 = self.conv3_3_norm_mbox_loc(f3_3)
113
+ cls2 = self.conv4_3_norm_mbox_conf(f4_3)
114
+ reg2 = self.conv4_3_norm_mbox_loc(f4_3)
115
+ cls3 = self.conv5_3_norm_mbox_conf(f5_3)
116
+ reg3 = self.conv5_3_norm_mbox_loc(f5_3)
117
+ cls4 = self.fc7_mbox_conf(ffc7)
118
+ reg4 = self.fc7_mbox_loc(ffc7)
119
+ cls5 = self.conv6_2_mbox_conf(f6_2)
120
+ reg5 = self.conv6_2_mbox_loc(f6_2)
121
+ cls6 = self.conv7_2_mbox_conf(f7_2)
122
+ reg6 = self.conv7_2_mbox_loc(f7_2)
123
+
124
+ # max-out background label
125
+ chunk = torch.chunk(cls1, 4, 1)
126
+ bmax = torch.max(torch.max(chunk[0], chunk[1]), chunk[2])
127
+ cls1 = torch.cat([bmax, chunk[3]], dim=1)
128
+
129
+ return [cls1, reg1, cls2, reg2, cls3, reg3, cls4, reg4, cls5, reg5, cls6, reg6]
FaceLandmarkDetection/face_alignment/detection/sfd/sfd_detector.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ from torch.utils.model_zoo import load_url
4
+
5
+ from ..core import FaceDetector
6
+
7
+ from .net_s3fd import s3fd
8
+ from .bbox import *
9
+ from .detect import *
10
+
11
+ models_urls = {
12
+ 's3fd': 'https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth',
13
+ }
14
+
15
+
16
+ class SFDDetector(FaceDetector):
17
+ def __init__(self, device, path_to_detector=None, verbose=False):
18
+ super(SFDDetector, self).__init__(device, verbose)
19
+
20
+ # Initialise the face detector
21
+ if path_to_detector is None:
22
+ model_weights = load_url(models_urls['s3fd'])
23
+ else:
24
+ model_weights = torch.load(path_to_detector)
25
+
26
+ self.face_detector = s3fd()
27
+ self.face_detector.load_state_dict(model_weights)
28
+ self.face_detector.to(device)
29
+ self.face_detector.eval()
30
+
31
+ def detect_from_image(self, tensor_or_path):
32
+ image = self.tensor_or_path_to_ndarray(tensor_or_path)
33
+
34
+ bboxlist = detect(self.face_detector, image, device=self.device)
35
+ keep = nms(bboxlist, 0.3)
36
+ bboxlist = bboxlist[keep, :]
37
+ bboxlist = [x for x in bboxlist if x[-1] > 0.5]
38
+
39
+ return bboxlist
40
+
41
+ @property
42
+ def reference_scale(self):
43
+ return 195
44
+
45
+ @property
46
+ def reference_x_shift(self):
47
+ return 0
48
+
49
+ @property
50
+ def reference_y_shift(self):
51
+ return 0
FaceLandmarkDetection/face_alignment/models.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import math
5
+
6
+
7
+ def conv3x3(in_planes, out_planes, strd=1, padding=1, bias=False):
8
+ "3x3 convolution with padding"
9
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3,
10
+ stride=strd, padding=padding, bias=bias)
11
+
12
+
13
+ class ConvBlock(nn.Module):
14
+ def __init__(self, in_planes, out_planes):
15
+ super(ConvBlock, self).__init__()
16
+ self.bn1 = nn.BatchNorm2d(in_planes)
17
+ self.conv1 = conv3x3(in_planes, int(out_planes / 2))
18
+ self.bn2 = nn.BatchNorm2d(int(out_planes / 2))
19
+ self.conv2 = conv3x3(int(out_planes / 2), int(out_planes / 4))
20
+ self.bn3 = nn.BatchNorm2d(int(out_planes / 4))
21
+ self.conv3 = conv3x3(int(out_planes / 4), int(out_planes / 4))
22
+
23
+ if in_planes != out_planes:
24
+ self.downsample = nn.Sequential(
25
+ nn.BatchNorm2d(in_planes),
26
+ nn.ReLU(True),
27
+ nn.Conv2d(in_planes, out_planes,
28
+ kernel_size=1, stride=1, bias=False),
29
+ )
30
+ else:
31
+ self.downsample = None
32
+
33
+ def forward(self, x):
34
+ residual = x
35
+
36
+ out1 = self.bn1(x)
37
+ out1 = F.relu(out1, True)
38
+ out1 = self.conv1(out1)
39
+
40
+ out2 = self.bn2(out1)
41
+ out2 = F.relu(out2, True)
42
+ out2 = self.conv2(out2)
43
+
44
+ out3 = self.bn3(out2)
45
+ out3 = F.relu(out3, True)
46
+ out3 = self.conv3(out3)
47
+
48
+ out3 = torch.cat((out1, out2, out3), 1)
49
+
50
+ if self.downsample is not None:
51
+ residual = self.downsample(residual)
52
+
53
+ out3 += residual
54
+
55
+ return out3
56
+
57
+
58
+ class Bottleneck(nn.Module):
59
+
60
+ expansion = 4
61
+
62
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
63
+ super(Bottleneck, self).__init__()
64
+ self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
65
+ self.bn1 = nn.BatchNorm2d(planes)
66
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
67
+ padding=1, bias=False)
68
+ self.bn2 = nn.BatchNorm2d(planes)
69
+ self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
70
+ self.bn3 = nn.BatchNorm2d(planes * 4)
71
+ self.relu = nn.ReLU(inplace=True)
72
+ self.downsample = downsample
73
+ self.stride = stride
74
+
75
+ def forward(self, x):
76
+ residual = x
77
+
78
+ out = self.conv1(x)
79
+ out = self.bn1(out)
80
+ out = self.relu(out)
81
+
82
+ out = self.conv2(out)
83
+ out = self.bn2(out)
84
+ out = self.relu(out)
85
+
86
+ out = self.conv3(out)
87
+ out = self.bn3(out)
88
+
89
+ if self.downsample is not None:
90
+ residual = self.downsample(x)
91
+
92
+ out += residual
93
+ out = self.relu(out)
94
+
95
+ return out
96
+
97
+
98
+ class HourGlass(nn.Module):
99
+ def __init__(self, num_modules, depth, num_features):
100
+ super(HourGlass, self).__init__()
101
+ self.num_modules = num_modules
102
+ self.depth = depth
103
+ self.features = num_features
104
+
105
+ self._generate_network(self.depth)
106
+
107
+ def _generate_network(self, level):
108
+ self.add_module('b1_' + str(level), ConvBlock(self.features, self.features))
109
+
110
+ self.add_module('b2_' + str(level), ConvBlock(self.features, self.features))
111
+
112
+ if level > 1:
113
+ self._generate_network(level - 1)
114
+ else:
115
+ self.add_module('b2_plus_' + str(level), ConvBlock(self.features, self.features))
116
+
117
+ self.add_module('b3_' + str(level), ConvBlock(self.features, self.features))
118
+
119
+ def _forward(self, level, inp):
120
+ # Upper branch
121
+ up1 = inp
122
+ up1 = self._modules['b1_' + str(level)](up1)
123
+
124
+ # Lower branch
125
+ low1 = F.avg_pool2d(inp, 2, stride=2)
126
+ low1 = self._modules['b2_' + str(level)](low1)
127
+
128
+ if level > 1:
129
+ low2 = self._forward(level - 1, low1)
130
+ else:
131
+ low2 = low1
132
+ low2 = self._modules['b2_plus_' + str(level)](low2)
133
+
134
+ low3 = low2
135
+ low3 = self._modules['b3_' + str(level)](low3)
136
+
137
+ up2 = F.interpolate(low3, scale_factor=2, mode='nearest')
138
+
139
+ return up1 + up2
140
+
141
+ def forward(self, x):
142
+ return self._forward(self.depth, x)
143
+
144
+
145
+ class FAN(nn.Module):
146
+
147
+ def __init__(self, num_modules=1):
148
+ super(FAN, self).__init__()
149
+ self.num_modules = num_modules
150
+
151
+ # Base part
152
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
153
+ self.bn1 = nn.BatchNorm2d(64)
154
+ self.conv2 = ConvBlock(64, 128)
155
+ self.conv3 = ConvBlock(128, 128)
156
+ self.conv4 = ConvBlock(128, 256)
157
+
158
+ # Stacking part
159
+ for hg_module in range(self.num_modules):
160
+ self.add_module('m' + str(hg_module), HourGlass(1, 4, 256))
161
+ self.add_module('top_m_' + str(hg_module), ConvBlock(256, 256))
162
+ self.add_module('conv_last' + str(hg_module),
163
+ nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
164
+ self.add_module('bn_end' + str(hg_module), nn.BatchNorm2d(256))
165
+ self.add_module('l' + str(hg_module), nn.Conv2d(256,
166
+ 68, kernel_size=1, stride=1, padding=0))
167
+
168
+ if hg_module < self.num_modules - 1:
169
+ self.add_module(
170
+ 'bl' + str(hg_module), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
171
+ self.add_module('al' + str(hg_module), nn.Conv2d(68,
172
+ 256, kernel_size=1, stride=1, padding=0))
173
+
174
+ def forward(self, x):
175
+ x = F.relu(self.bn1(self.conv1(x)), True)
176
+ x = F.avg_pool2d(self.conv2(x), 2, stride=2)
177
+ x = self.conv3(x)
178
+ x = self.conv4(x)
179
+
180
+ previous = x
181
+
182
+ outputs = []
183
+ for i in range(self.num_modules):
184
+ hg = self._modules['m' + str(i)](previous)
185
+
186
+ ll = hg
187
+ ll = self._modules['top_m_' + str(i)](ll)
188
+
189
+ ll = F.relu(self._modules['bn_end' + str(i)]
190
+ (self._modules['conv_last' + str(i)](ll)), True)
191
+
192
+ # Predict heatmaps
193
+ tmp_out = self._modules['l' + str(i)](ll)
194
+ outputs.append(tmp_out)
195
+
196
+ if i < self.num_modules - 1:
197
+ ll = self._modules['bl' + str(i)](ll)
198
+ tmp_out_ = self._modules['al' + str(i)](tmp_out)
199
+ previous = previous + ll + tmp_out_
200
+
201
+ return outputs
202
+
203
+
204
+ class ResNetDepth(nn.Module):
205
+
206
+ def __init__(self, block=Bottleneck, layers=[3, 8, 36, 3], num_classes=68):
207
+ self.inplanes = 64
208
+ super(ResNetDepth, self).__init__()
209
+ self.conv1 = nn.Conv2d(3 + 68, 64, kernel_size=7, stride=2, padding=3,
210
+ bias=False)
211
+ self.bn1 = nn.BatchNorm2d(64)
212
+ self.relu = nn.ReLU(inplace=True)
213
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
214
+ self.layer1 = self._make_layer(block, 64, layers[0])
215
+ self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
216
+ self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
217
+ self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
218
+ self.avgpool = nn.AvgPool2d(7)
219
+ self.fc = nn.Linear(512 * block.expansion, num_classes)
220
+
221
+ for m in self.modules():
222
+ if isinstance(m, nn.Conv2d):
223
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
224
+ m.weight.data.normal_(0, math.sqrt(2. / n))
225
+ elif isinstance(m, nn.BatchNorm2d):
226
+ m.weight.data.fill_(1)
227
+ m.bias.data.zero_()
228
+
229
+ def _make_layer(self, block, planes, blocks, stride=1):
230
+ downsample = None
231
+ if stride != 1 or self.inplanes != planes * block.expansion:
232
+ downsample = nn.Sequential(
233
+ nn.Conv2d(self.inplanes, planes * block.expansion,
234
+ kernel_size=1, stride=stride, bias=False),
235
+ nn.BatchNorm2d(planes * block.expansion),
236
+ )
237
+
238
+ layers = []
239
+ layers.append(block(self.inplanes, planes, stride, downsample))
240
+ self.inplanes = planes * block.expansion
241
+ for i in range(1, blocks):
242
+ layers.append(block(self.inplanes, planes))
243
+
244
+ return nn.Sequential(*layers)
245
+
246
+ def forward(self, x):
247
+ x = self.conv1(x)
248
+ x = self.bn1(x)
249
+ x = self.relu(x)
250
+ x = self.maxpool(x)
251
+
252
+ x = self.layer1(x)
253
+ x = self.layer2(x)
254
+ x = self.layer3(x)
255
+ x = self.layer4(x)
256
+
257
+ x = self.avgpool(x)
258
+ x = x.view(x.size(0), -1)
259
+ x = self.fc(x)
260
+
261
+ return x
FaceLandmarkDetection/face_alignment/utils.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+ import os
3
+ import sys
4
+ import time
5
+ import torch
6
+ import math
7
+ import numpy as np
8
+ import cv2
9
+
10
+
11
+ def _gaussian(
12
+ size=3, sigma=0.25, amplitude=1, normalize=False, width=None,
13
+ height=None, sigma_horz=None, sigma_vert=None, mean_horz=0.5,
14
+ mean_vert=0.5):
15
+ # handle some defaults
16
+ if width is None:
17
+ width = size
18
+ if height is None:
19
+ height = size
20
+ if sigma_horz is None:
21
+ sigma_horz = sigma
22
+ if sigma_vert is None:
23
+ sigma_vert = sigma
24
+ center_x = mean_horz * width + 0.5
25
+ center_y = mean_vert * height + 0.5
26
+ gauss = np.empty((height, width), dtype=np.float32)
27
+ # generate kernel
28
+ for i in range(height):
29
+ for j in range(width):
30
+ gauss[i][j] = amplitude * math.exp(-(math.pow((j + 1 - center_x) / (
31
+ sigma_horz * width), 2) / 2.0 + math.pow((i + 1 - center_y) / (sigma_vert * height), 2) / 2.0))
32
+ if normalize:
33
+ gauss = gauss / np.sum(gauss)
34
+ return gauss
35
+
36
+
37
+ def draw_gaussian(image, point, sigma):
38
+ # Check if the gaussian is inside
39
+ ul = [math.floor(point[0] - 3 * sigma), math.floor(point[1] - 3 * sigma)]
40
+ br = [math.floor(point[0] + 3 * sigma), math.floor(point[1] + 3 * sigma)]
41
+ if (ul[0] > image.shape[1] or ul[1] > image.shape[0] or br[0] < 1 or br[1] < 1):
42
+ return image
43
+ size = 6 * sigma + 1
44
+ g = _gaussian(size)
45
+ g_x = [int(max(1, -ul[0])), int(min(br[0], image.shape[1])) - int(max(1, ul[0])) + int(max(1, -ul[0]))]
46
+ g_y = [int(max(1, -ul[1])), int(min(br[1], image.shape[0])) - int(max(1, ul[1])) + int(max(1, -ul[1]))]
47
+ img_x = [int(max(1, ul[0])), int(min(br[0], image.shape[1]))]
48
+ img_y = [int(max(1, ul[1])), int(min(br[1], image.shape[0]))]
49
+ assert (g_x[0] > 0 and g_y[1] > 0)
50
+ image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]
51
+ ] = image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] + g[g_y[0] - 1:g_y[1], g_x[0] - 1:g_x[1]]
52
+ image[image > 1] = 1
53
+ return image
54
+
55
+
56
+ def transform(point, center, scale, resolution, invert=False):
57
+ """Generate and affine transformation matrix.
58
+
59
+ Given a set of points, a center, a scale and a targer resolution, the
60
+ function generates and affine transformation matrix. If invert is ``True``
61
+ it will produce the inverse transformation.
62
+
63
+ Arguments:
64
+ point {torch.tensor} -- the input 2D point
65
+ center {torch.tensor or numpy.array} -- the center around which to perform the transformations
66
+ scale {float} -- the scale of the face/object
67
+ resolution {float} -- the output resolution
68
+
69
+ Keyword Arguments:
70
+ invert {bool} -- define wherever the function should produce the direct or the
71
+ inverse transformation matrix (default: {False})
72
+ """
73
+ _pt = torch.ones(3)
74
+ _pt[0] = point[0]
75
+ _pt[1] = point[1]
76
+
77
+ h = 200.0 * scale
78
+ t = torch.eye(3)
79
+ t[0, 0] = resolution / h
80
+ t[1, 1] = resolution / h
81
+ t[0, 2] = resolution * (-center[0] / h + 0.5)
82
+ t[1, 2] = resolution * (-center[1] / h + 0.5)
83
+
84
+ if invert:
85
+ t = torch.inverse(t)
86
+
87
+ new_point = (torch.matmul(t, _pt))[0:2]
88
+
89
+ return new_point.int()
90
+
91
+
92
+ def crop(image, center, scale, resolution=256.0):
93
+ """Center crops an image or set of heatmaps
94
+
95
+ Arguments:
96
+ image {numpy.array} -- an rgb image
97
+ center {numpy.array} -- the center of the object, usually the same as of the bounding box
98
+ scale {float} -- scale of the face
99
+
100
+ Keyword Arguments:
101
+ resolution {float} -- the size of the output cropped image (default: {256.0})
102
+
103
+ Returns:
104
+ [type] -- [description]
105
+ """ # Crop around the center point
106
+ """ Crops the image around the center. Input is expected to be an np.ndarray """
107
+ ul = transform([1, 1], center, scale, resolution, True)
108
+ br = transform([resolution, resolution], center, scale, resolution, True)
109
+ # pad = math.ceil(torch.norm((ul - br).float()) / 2.0 - (br[0] - ul[0]) / 2.0)
110
+ if image.ndim > 2:
111
+ newDim = np.array([br[1] - ul[1], br[0] - ul[0],
112
+ image.shape[2]], dtype=np.int32)
113
+ newImg = np.zeros(newDim, dtype=np.uint8)
114
+ else:
115
+ newDim = np.array([br[1] - ul[1], br[0] - ul[0]], dtype=np.int)
116
+ newImg = np.zeros(newDim, dtype=np.uint8)
117
+ ht = image.shape[0]
118
+ wd = image.shape[1]
119
+ newX = np.array(
120
+ [max(1, -ul[0] + 1), min(br[0], wd) - ul[0]], dtype=np.int32)
121
+ newY = np.array(
122
+ [max(1, -ul[1] + 1), min(br[1], ht) - ul[1]], dtype=np.int32)
123
+ oldX = np.array([max(1, ul[0] + 1), min(br[0], wd)], dtype=np.int32)
124
+ oldY = np.array([max(1, ul[1] + 1), min(br[1], ht)], dtype=np.int32)
125
+ newImg[newY[0] - 1:newY[1], newX[0] - 1:newX[1]
126
+ ] = image[oldY[0] - 1:oldY[1], oldX[0] - 1:oldX[1], :]
127
+ newImg = cv2.resize(newImg, dsize=(int(resolution), int(resolution)),
128
+ interpolation=cv2.INTER_LINEAR)
129
+ return newImg
130
+
131
+
132
+ def get_preds_fromhm(hm, center=None, scale=None):
133
+ """Obtain (x,y) coordinates given a set of N heatmaps. If the center
134
+ and the scale is provided the function will return the points also in
135
+ the original coordinate frame.
136
+
137
+ Arguments:
138
+ hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H]
139
+
140
+ Keyword Arguments:
141
+ center {torch.tensor} -- the center of the bounding box (default: {None})
142
+ scale {float} -- face scale (default: {None})
143
+ """
144
+ max, idx = torch.max(
145
+ hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2)
146
+ idx += 1
147
+ preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float()
148
+ preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1)
149
+ preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1)
150
+
151
+ for i in range(preds.size(0)):
152
+ for j in range(preds.size(1)):
153
+ hm_ = hm[i, j, :]
154
+ pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1
155
+ if pX > 0 and pX < 63 and pY > 0 and pY < 63:
156
+ diff = torch.FloatTensor(
157
+ [hm_[pY, pX + 1] - hm_[pY, pX - 1],
158
+ hm_[pY + 1, pX] - hm_[pY - 1, pX]])
159
+ preds[i, j].add_(diff.sign_().mul_(.25))
160
+
161
+ preds.add_(-.5)
162
+
163
+ preds_orig = torch.zeros(preds.size())
164
+ if center is not None and scale is not None:
165
+ for i in range(hm.size(0)):
166
+ for j in range(hm.size(1)):
167
+ preds_orig[i, j] = transform(
168
+ preds[i, j], center, scale, hm.size(2), True)
169
+
170
+ return preds, preds_orig
171
+
172
+
173
+ def shuffle_lr(parts, pairs=None):
174
+ """Shuffle the points left-right according to the axis of symmetry
175
+ of the object.
176
+
177
+ Arguments:
178
+ parts {torch.tensor} -- a 3D or 4D object containing the
179
+ heatmaps.
180
+
181
+ Keyword Arguments:
182
+ pairs {list of integers} -- [order of the flipped points] (default: {None})
183
+ """
184
+ if pairs is None:
185
+ pairs = [16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
186
+ 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 27, 28, 29, 30, 35,
187
+ 34, 33, 32, 31, 45, 44, 43, 42, 47, 46, 39, 38, 37, 36, 41,
188
+ 40, 54, 53, 52, 51, 50, 49, 48, 59, 58, 57, 56, 55, 64, 63,
189
+ 62, 61, 60, 67, 66, 65]
190
+ if parts.ndimension() == 3:
191
+ parts = parts[pairs, ...]
192
+ else:
193
+ parts = parts[:, pairs, ...]
194
+
195
+ return parts
196
+
197
+
198
+ def flip(tensor, is_label=False):
199
+ """Flip an image or a set of heatmaps left-right
200
+
201
+ Arguments:
202
+ tensor {numpy.array or torch.tensor} -- [the input image or heatmaps]
203
+
204
+ Keyword Arguments:
205
+ is_label {bool} -- [denote wherever the input is an image or a set of heatmaps ] (default: {False})
206
+ """
207
+ if not torch.is_tensor(tensor):
208
+ tensor = torch.from_numpy(tensor)
209
+
210
+ if is_label:
211
+ tensor = shuffle_lr(tensor).flip(tensor.ndimension() - 1)
212
+ else:
213
+ tensor = tensor.flip(tensor.ndimension() - 1)
214
+
215
+ return tensor
216
+
217
+ # From pyzolib/paths.py (https://bitbucket.org/pyzo/pyzolib/src/tip/paths.py)
218
+
219
+
220
+ def appdata_dir(appname=None, roaming=False):
221
+ """ appdata_dir(appname=None, roaming=False)
222
+
223
+ Get the path to the application directory, where applications are allowed
224
+ to write user specific files (e.g. configurations). For non-user specific
225
+ data, consider using common_appdata_dir().
226
+ If appname is given, a subdir is appended (and created if necessary).
227
+ If roaming is True, will prefer a roaming directory (Windows Vista/7).
228
+ """
229
+
230
+ # Define default user directory
231
+ userDir = os.getenv('FACEALIGNMENT_USERDIR', None)
232
+ if userDir is None:
233
+ userDir = os.path.expanduser('~')
234
+ if not os.path.isdir(userDir): # pragma: no cover
235
+ userDir = '/var/tmp' # issue #54
236
+
237
+ # Get system app data dir
238
+ path = None
239
+ if sys.platform.startswith('win'):
240
+ path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
241
+ path = (path2 or path1) if roaming else (path1 or path2)
242
+ elif sys.platform.startswith('darwin'):
243
+ path = os.path.join(userDir, 'Library', 'Application Support')
244
+ # On Linux and as fallback
245
+ if not (path and os.path.isdir(path)):
246
+ path = userDir
247
+
248
+ # Maybe we should store things local to the executable (in case of a
249
+ # portable distro or a frozen application that wants to be portable)
250
+ prefix = sys.prefix
251
+ if getattr(sys, 'frozen', None):
252
+ prefix = os.path.abspath(os.path.dirname(sys.executable))
253
+ for reldir in ('settings', '../settings'):
254
+ localpath = os.path.abspath(os.path.join(prefix, reldir))
255
+ if os.path.isdir(localpath): # pragma: no cover
256
+ try:
257
+ open(os.path.join(localpath, 'test.write'), 'wb').close()
258
+ os.remove(os.path.join(localpath, 'test.write'))
259
+ except IOError:
260
+ pass # We cannot write in this directory
261
+ else:
262
+ path = localpath
263
+ break
264
+
265
+ # Get path specific for this app
266
+ if appname:
267
+ if path == userDir:
268
+ appname = '.' + appname.lstrip('.') # Make it a hidden directory
269
+ path = os.path.join(path, appname)
270
+ if not os.path.isdir(path): # pragma: no cover
271
+ os.mkdir(path)
272
+
273
+ # Done
274
+ return path
FaceLandmarkDetection/get_face_landmark.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python #encoding:utf-8
2
+ import torch
3
+ import pickle
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from PIL import Image
7
+
8
+ import cv2
9
+ import os
10
+ import face_alignment
11
+ from skimage import io, transform
12
+
13
+ fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D,device='cuda:0', flip_input=False)
14
+
15
+
16
+ Nums = 0
17
+ FilePath = '../TestData/RealVgg/Imgs'
18
+ SavePath = '../TestData/RealVgg/Imgs/Landmarks'
19
+ if not os.path.exists(SavePath):
20
+ os.mkdir(SavePath)
21
+
22
+ ImgNames = os.listdir(FilePath)
23
+ ImgNames.sort()
24
+
25
+ for i,name in enumerate(ImgNames):
26
+ print((i,name))
27
+
28
+ imgs = io.imread(os.path.join(FilePath,name))
29
+
30
+ imgO = imgs
31
+ try:
32
+ PredsAll = fa.get_landmarks(imgO)
33
+ except:
34
+ print('#########No face')
35
+ continue
36
+ if PredsAll is None:
37
+ print('#########No face2')
38
+ continue
39
+ if len(PredsAll)!=1:
40
+ print('#########too many face')
41
+ continue
42
+ preds = PredsAll[-1]
43
+ AddLength = np.sqrt(np.sum(np.power(preds[27][0:2]-preds[33][0:2],2)))
44
+ SaveName = name+'.txt'
45
+
46
+ np.savetxt(os.path.join(SavePath,SaveName),preds[:,0:2],fmt='%.3f')
FaceLandmarkDetection/setup.cfg ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [bumpversion]
2
+ current_version = 1.0.1
3
+ commit = True
4
+ tag = True
5
+
6
+ [bumpversion:file:setup.py]
7
+ search = version='{current_version}'
8
+ replace = version='{new_version}'
9
+
10
+ [bumpversion:file:face_alignment/__init__.py]
11
+ search = __version__ = '{current_version}'
12
+ replace = __version__ = '{new_version}'
13
+
14
+ [metadata]
15
+ description-file = README.md
16
+
17
+ [bdist_wheel]
18
+ universal = 1
19
+
20
+ [flake8]
21
+ exclude =
22
+ .github,
23
+ examples,
24
+ docs,
25
+ .tox,
26
+ bin,
27
+ dist,
28
+ tools,
29
+ *.egg-info,
30
+ __init__.py,
31
+ *.yml
32
+ max-line-length = 160
FaceLandmarkDetection/setup.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+ from os import path
4
+ import re
5
+ from setuptools import setup, find_packages
6
+ # To use consisten encodings
7
+ from codecs import open
8
+
9
+ # Function from: https://github.com/pytorch/vision/blob/master/setup.py
10
+
11
+
12
+ def read(*names, **kwargs):
13
+ with io.open(
14
+ os.path.join(os.path.dirname(__file__), *names),
15
+ encoding=kwargs.get("encoding", "utf8")
16
+ ) as fp:
17
+ return fp.read()
18
+
19
+ # Function from: https://github.com/pytorch/vision/blob/master/setup.py
20
+
21
+
22
+ def find_version(*file_paths):
23
+ version_file = read(*file_paths)
24
+ version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
25
+ version_file, re.M)
26
+ if version_match:
27
+ return version_match.group(1)
28
+ raise RuntimeError("Unable to find version string.")
29
+
30
+ here = path.abspath(path.dirname(__file__))
31
+
32
+ # Get the long description from the README file
33
+ with open(path.join(here, 'README.md'), encoding='utf-8') as readme_file:
34
+ long_description = readme_file.read()
35
+
36
+ VERSION = find_version('face_alignment', '__init__.py')
37
+
38
+ requirements = [
39
+ 'torch',
40
+ 'numpy',
41
+ 'scipy>=0.17',
42
+ 'scikit-image',
43
+ 'opencv-python',
44
+ 'tqdm',
45
+ 'enum34;python_version<"3.4"'
46
+ ]
47
+
48
+ setup(
49
+ name='face_alignment',
50
+ version=VERSION,
51
+
52
+ description="Detector 2D or 3D face landmarks from Python",
53
+ long_description=long_description,
54
+ long_description_content_type="text/markdown",
55
+
56
+ # Author details
57
+ author="Adrian Bulat",
58
+ author_email="adrian.bulat@nottingham.ac.uk",
59
+ url="https://github.com/1adrianb/face-alignment",
60
+
61
+ # Package info
62
+ packages=find_packages(exclude=('test',)),
63
+
64
+ install_requires=requirements,
65
+ license='BSD',
66
+ zip_safe=True,
67
+
68
+ classifiers=[
69
+ 'Development Status :: 5 - Production/Stable',
70
+ 'Operating System :: OS Independent',
71
+ 'License :: OSI Approved :: BSD License',
72
+ 'Natural Language :: English',
73
+
74
+ # Supported python versions
75
+ 'Programming Language :: Python :: 2',
76
+ 'Programming Language :: Python :: 2.7',
77
+ 'Programming Language :: Python :: 3',
78
+ 'Programming Language :: Python :: 3.3',
79
+ 'Programming Language :: Python :: 3.4',
80
+ 'Programming Language :: Python :: 3.5',
81
+ 'Programming Language :: Python :: 3.6',
82
+ ],
83
+ )
FaceLandmarkDetection/test/assets/aflw-test.jpg ADDED
FaceLandmarkDetection/test/facealignment_test.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+ import face_alignment
3
+
4
+
5
+ class Tester(unittest.TestCase):
6
+ def test_predict_points(self):
7
+ fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, device='cpu')
8
+ fa.get_landmarks('test/assets/aflw-test.jpg')
9
+
10
+ if __name__ == '__main__':
11
+ unittest.main()
FaceLandmarkDetection/test/smoke_test.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ import torch
2
+ import face_alignment
FaceLandmarkDetection/test/test_utils.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+ from face_alignment.utils import *
3
+ import numpy as np
4
+ import torch
5
+
6
+
7
+ class Tester(unittest.TestCase):
8
+ def test_flip_is_label(self):
9
+ # Generate the points
10
+ heatmaps = torch.from_numpy(np.random.randint(1, high=250, size=(68, 64, 64)).astype('float32'))
11
+
12
+ flipped_heatmaps = flip(flip(heatmaps.clone(), is_label=True), is_label=True)
13
+
14
+ assert np.allclose(heatmaps.numpy(), flipped_heatmaps.numpy())
15
+
16
+ def test_flip_is_image(self):
17
+ fake_image = torch.torch.rand(3, 256, 256)
18
+ fliped_fake_image = flip(flip(fake_image.clone()))
19
+
20
+ assert np.allclose(fake_image.numpy(), fliped_fake_image.numpy())
21
+
22
+ def test_getpreds(self):
23
+ pts = torch.from_numpy(np.random.randint(1, high=63, size=(68, 2)).astype('float32'))
24
+
25
+ heatmaps = np.zeros((68, 256, 256))
26
+ for i in range(68):
27
+ if pts[i, 0] > 0:
28
+ heatmaps[i] = draw_gaussian(heatmaps[i], pts[i], 2)
29
+ heatmaps = torch.from_numpy(np.expand_dims(heatmaps, axis=0))
30
+
31
+ preds, _ = get_preds_fromhm(heatmaps)
32
+
33
+ assert np.allclose(pts.numpy(), preds.numpy(), atol=5)
34
+
35
+ if __name__ == '__main__':
36
+ unittest.main()
FaceLandmarkDetection/tox.ini ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [flake8]
2
+ max-line-length = 120
3
+ ignore = E305,E402,E721,F401,F403,F405,F821,F841,F999