deepface / deeperface.py
athanasopoulou's picture
Add deepface app
1b3fb15
from deepface import DeepFace
from deepface.detectors import FaceDetector, OpenCvWrapper
from deepface.extendedmodels import Emotion
import cv2
import deepface.commons.functions
import numpy
import opennsfw2
class Emotion:
labels = [emotion.capitalize() for emotion in Emotion.labels]
model = DeepFace.build_model('Emotion')
class NSFW:
labels = [False, True]
model = opennsfw2.make_open_nsfw_model()
################################################################################
class Pixels(numpy.ndarray):
@classmethod
def read(cls, path):
return cv2.imread(path).view(type=cls)
def write(self, path):
cv2.imwrite(path, self)
class FaceImage(Pixels):
def analyze(face_img):
face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)
face_img = cv2.resize(face_img, (48, 48))
face_img = numpy.expand_dims(face_img, axis=0)
predictions = Emotion.model.predict(face_img).ravel()
return Emotion.labels[numpy.argmax(predictions)]
def represent(face_img):
face_img = numpy.expand_dims(face_img, axis=0)
return DeepFace.represent(face_img,
'VGG-Face',
detector_backend='skip')[0]['embedding']
class Image(Pixels):
def annotate(img, face, emotion):
face_annotation = numpy.zeros_like(img)
face_annotation = cv2.cvtColor(face_annotation,
cv2.COLOR_BGR2GRAY).view(type=Pixels)
x, y, w, h = face
axes = (int(0.1 * w), int(0.1 * h))
cv2.ellipse(face_annotation, (x + axes[0], y + axes[1]), axes, 180, 0,
90, (1, 0, 0), 2)
cv2.ellipse(face_annotation, (x + w - axes[0], y + axes[1]), axes, 270,
0, 90, (1, 0, 0), 2)
cv2.ellipse(face_annotation, (x + axes[0], y + h - axes[1]), axes, 90,
0, 90, (1, 0, 0), 2)
cv2.ellipse(face_annotation, (x + w - axes[0], y + h - axes[1]), axes,
0, 0, 90, (1, 0, 0), 2)
emotion_annotation = numpy.zeros_like(img)
emotion_annotation = cv2.cvtColor(emotion_annotation,
cv2.COLOR_BGR2GRAY).view(type=Pixels)
for fontScale in numpy.arange(10, 0, -0.1):
textSize, _ = cv2.getTextSize(emotion, cv2.FONT_HERSHEY_SIMPLEX,
fontScale, 2)
if textSize[0] <= int(0.6 * w):
break
cv2.putText(emotion_annotation, emotion,
(int(x + (w - textSize[0]) / 2), int(y + textSize[1] / 2)),
cv2.FONT_HERSHEY_SIMPLEX, fontScale, (1, 0, 0), 2)
return [(face_annotation, 'face'), (emotion_annotation, 'emotion')]
def detect_faces(img):
face_detector = FaceDetector.build_model('opencv')
faces = []
for _, face, _ in FaceDetector.detect_faces(face_detector, 'opencv',
img, False):
face = (int(face[0]), int(face[1]), int(face[2]), int(face[3]))
faces.append(face)
return faces
def extract_face(img, face):
face_detector = FaceDetector.build_model('opencv')
x, y, w, h = face
img = img[y:y + h, x:x + w]
img = OpenCvWrapper.align_face(face_detector['eye_detector'], img)
target_size = deepface.commons.functions.find_target_size('VGG-Face')
face_img, _, _ = deepface.commons.functions.extract_faces(
img, target_size, 'skip')[0]
face_img = numpy.squeeze(face_img, axis=0)
return face_img.view(type=FaceImage)
def nsfw(img):
img = cv2.resize(img, (224, 224))
img = img - numpy.array([104, 117, 123], numpy.float32)
img = numpy.expand_dims(img, axis=0)
predictions = NSFW.model.predict(img).ravel()
return NSFW.labels[numpy.argmax(predictions)]
def pixelate(img):
h, w, _ = img.shape
img = cv2.resize(img, (16, 16))
return cv2.resize(img, (w, h),
interpolation=cv2.INTER_NEAREST).view(type=Pixels)
################################################################################
class Metadata(dict):
def __init__(self, img):
metadata = {}
for face in img.detect_faces():
face_img = img.extract_face(face)
emotion = face_img.analyze()
representation = face_img.represent()
metadata[face] = {
'emotion': emotion,
'representation': representation
}
super(Metadata, self).__init__(metadata)
def emotions(self):
return [value['emotion'] for value in self.values()]
def representations(self):
return [value['representation'] for value in self.values()]
################################################################################
def verify(source_representations, test_representations):
for source_representation in source_representations:
for test_representation in test_representations:
if deepface.commons.distance.findCosineDistance(
source_representation, test_representation
) < deepface.commons.distance.findThreshold('VGG-Face', 'cosine'):
return True
return False