| | import cv2 |
| | import matplotlib.pyplot as plt |
| | import streamlit as st |
| | from deepface import DeepFace |
| | import mediapipe |
| | import os |
| | import tempfile |
| |
|
| | backends = [ |
| | 'opencv', |
| | 'ssd', |
| | 'dlib', |
| | 'mtcnn', |
| | 'fastmtcnn', |
| | 'retinaface', |
| | 'mediapipe', |
| | 'yolov8', |
| | 'yunet', |
| | 'centerface', |
| | ] |
| | metrics = ["cosine", "euclidean", "euclidean_l2"] |
| | models = [ |
| | "VGG-Face", |
| | "Facenet", |
| | "Facenet512", |
| | "OpenFace", |
| | "DeepFace", |
| | "DeepID", |
| | "ArcFace", |
| | "Dlib", |
| | "SFace", |
| | "GhostFaceNet", |
| | ] |
| |
|
| | def verify(img1, img2, model_name, backend, metric): |
| | |
| | with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_img1: |
| | temp_img1.write(img1.read()) |
| | temp_img1_path = temp_img1.name |
| |
|
| | with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_img2: |
| | temp_img2.write(img2.read()) |
| | temp_img2_path = temp_img2.name |
| |
|
| | img1p = cv2.imread(temp_img1_path) |
| | img2p = cv2.imread(temp_img2_path) |
| |
|
| | img1p = cv2.cvtColor(img1p, cv2.COLOR_BGR2RGB) |
| | img2p = cv2.cvtColor(img2p, cv2.COLOR_BGR2RGB) |
| |
|
| | face_detect = mediapipe.solutions.face_detection |
| | face_detector = face_detect.FaceDetection(min_detection_confidence=0.6) |
| |
|
| | width1, height1 = img1p.shape[1], img1p.shape[0] |
| | width2, height2 = img2p.shape[1], img2p.shape[0] |
| |
|
| | result1 = face_detector.process(img1p) |
| | result2 = face_detector.process(img2p) |
| | if result1.detections is not None: |
| | for face in result1.detections: |
| | if face.score[0] > 0.80: |
| | bounding_box = face.location_data.relative_bounding_box |
| | x = int(bounding_box.xmin * width1) |
| | w = int(bounding_box.width * width1) |
| | y = int(bounding_box.ymin * height1) |
| | h = int(bounding_box.height * height1) |
| | cv2.rectangle(img1p, (x, y), (x+w, y+h), color=(126, 133, 128), thickness=10) |
| | if result2.detections is not None: |
| | for face in result2.detections: |
| | if face.score[0] > 0.80: |
| | bounding_box = face.location_data.relative_bounding_box |
| | x = int(bounding_box.xmin * width2) |
| | w = int(bounding_box.width * width2) |
| | y = int(bounding_box.ymin * height2) |
| | h = int(bounding_box.height * height2) |
| | cv2.rectangle(img2p, (x, y), (x+w, y+h), color=(126, 133, 128), thickness=10) |
| |
|
| | st.image([img1p, img2p], caption=["Image 1", "Image 2"], width=200) |
| |
|
| | face = DeepFace.verify(img1p, img2p, model_name=model_name, detector_backend=backend, distance_metric=metric) |
| | verification = face["verified"] |
| |
|
| | if verification: |
| | st.write("Matched") |
| | else: |
| | st.write("Not Matched") |
| |
|
| | |
| | def main(): |
| | st.title("Face Verification App") |
| | tab_selection = st.sidebar.selectbox("Select Functionality", ["Face Verification", "Face Recognition", "Celebrity Lookalike", "Age and Emotions Detection"]) |
| |
|
| | if tab_selection == "Face Verification": |
| | st.header("Face Verification") |
| | model_name = st.selectbox("Select Model", models) |
| | backend = st.selectbox("Select Backend", backends) |
| | metric = st.selectbox("Select Metric", metrics) |
| |
|
| | uploaded_img1 = st.file_uploader("Upload Image 1", type=["jpg", "png"]) |
| | uploaded_img2 = st.file_uploader("Upload Image 2", type=["jpg", "png"]) |
| |
|
| | if uploaded_img1 and uploaded_img2: |
| | if st.button("Verify Faces"): |
| | verify(uploaded_img1, uploaded_img2, model_name, backend, metric) |
| |
|
| | |
| | if __name__ == "__main__": |
| | main() |
| |
|