VishyVish beykun18 commited on
Commit
a8abf4f
0 Parent(s):

Duplicate from beykun18/Face-ID-Mediapipe

Browse files

Co-authored-by: Bey Kun Chan <beykun18@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +31 -0
  2. README.md +13 -0
  3. app.py +81 -0
  4. requirements.txt +7 -0
.gitattributes ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.npy filter=lfs diff=lfs merge=lfs -text
13
+ *.npz filter=lfs diff=lfs merge=lfs -text
14
+ *.onnx filter=lfs diff=lfs merge=lfs -text
15
+ *.ot filter=lfs diff=lfs merge=lfs -text
16
+ *.parquet filter=lfs diff=lfs merge=lfs -text
17
+ *.pickle filter=lfs diff=lfs merge=lfs -text
18
+ *.pkl filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pt filter=lfs diff=lfs merge=lfs -text
21
+ *.pth filter=lfs diff=lfs merge=lfs -text
22
+ *.rar filter=lfs diff=lfs merge=lfs -text
23
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
25
+ *.tflite filter=lfs diff=lfs merge=lfs -text
26
+ *.tgz filter=lfs diff=lfs merge=lfs -text
27
+ *.wasm filter=lfs diff=lfs merge=lfs -text
28
+ *.xz filter=lfs diff=lfs merge=lfs -text
29
+ *.zip filter=lfs diff=lfs merge=lfs -text
30
+ *.zst filter=lfs diff=lfs merge=lfs -text
31
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Face ID
3
+ emoji: ⚡
4
+ colorFrom: green
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 3.4.1
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: beykun18/Face-ID-Mediapipe
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sklearn.metrics.pairwise import cosine_similarity
2
+ from sentence_transformers import SentenceTransformer
3
+ import datasets
4
+ import gradio as gr
5
+ import numpy as np
6
+ import torchvision.transforms as transforms
7
+ import mediapipe as mp
8
+ import cv2
9
+
10
+ #model = SentenceTransformer('clip-ViT-B-16')
11
+ model = SentenceTransformer('clip-ViT-B-32')
12
+ dataset = datasets.load_dataset('brendenc/celeb-identities')
13
+
14
+ def predict(im1, im2):
15
+ # Convert the PIL Image to a numpy array
16
+ im1 = np.array(im1)
17
+ im2 = np.array(im2)
18
+ face1 = im1.copy()
19
+ face2 = im2.copy()
20
+
21
+ img1_h, img1_w, _ = im1.shape
22
+ img2_h, img2_w, _ = im2.shape
23
+
24
+ # Locate face using mediapipe
25
+ mp_face_mesh = mp.solutions.face_mesh
26
+
27
+ with mp_face_mesh.FaceMesh(max_num_faces=1, refine_landmarks=True, min_detection_confidence=0.5, min_tracking_confidence=0.5) as face_mesh:
28
+ results1 = face_mesh.process(im1)
29
+ results2 = face_mesh.process(im2)
30
+
31
+ if results1.multi_face_landmarks:
32
+ for face_landmarks in results1.multi_face_landmarks:
33
+ # get location of face detected
34
+ top_x = int(face_landmarks.landmark[234].x * img1_w)
35
+ top_y = int(face_landmarks.landmark[10].y * img1_h)
36
+ bottom_x = int(face_landmarks.landmark[454].x * img1_w)
37
+ bottom_y = int(face_landmarks.landmark[152].y * img1_h)
38
+
39
+ face1 = im1[top_y:bottom_y, top_x:bottom_x]
40
+ cv2.rectangle(im1, (top_x, top_y), (bottom_x, bottom_y), (0, 255, 0), 2)
41
+
42
+ if results2.multi_face_landmarks:
43
+ for face_landmarks in results2.multi_face_landmarks:
44
+ # get location of face detected
45
+ top_x = int(face_landmarks.landmark[234].x * img2_w)
46
+ top_y = int(face_landmarks.landmark[10].y * img2_h)
47
+ bottom_x = int(face_landmarks.landmark[454].x * img2_w)
48
+ bottom_y = int(face_landmarks.landmark[152].y * img2_h)
49
+
50
+ face2 = im2[top_y:bottom_y, top_x:bottom_x]
51
+ cv2.rectangle(im2, (top_x, top_y), (bottom_x, bottom_y), (0, 255, 0), 2)
52
+
53
+ # Convert the tensor back to a PIL Image
54
+ face1 = transforms.ToPILImage()(face1)
55
+ im1 = transforms.ToPILImage()(im1)
56
+ face2 = transforms.ToPILImage()(face2)
57
+ im2 = transforms.ToPILImage()(im2)
58
+
59
+
60
+ embeddings = model.encode([face1, face2])
61
+ sim = cosine_similarity(embeddings)
62
+ sim = sim[0, 1]
63
+ if sim > 0.82:
64
+ return im1, im2, sim, "SAME PERSON, AUTHORIZE PAYMENT"
65
+ else:
66
+ return im1, im2, sim, "DIFFERENT PEOPLE, DON'T AUTHORIZE PAYMENT"
67
+
68
+
69
+ interface = gr.Interface(fn=predict,
70
+ inputs= [gr.Image(value = dataset['train']['image'][10], type="pil", source="webcam"),
71
+ gr.Image(value = dataset['train']['image'][17], type="pil", source="webcam")],
72
+ outputs= [gr.Image(),
73
+ gr.Image(),
74
+ gr.Number(label="Similarity"),
75
+ gr.Textbox(label="Message")],
76
+ title = 'Face ID',
77
+ description = 'This app uses face biometrics and a similarity to function as a Face ID application.The similarity score ranges from -1 to 1.'
78
+ )
79
+
80
+ interface.launch(debug=True)
81
+ #interface.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ scikit-learn
2
+ datasets
3
+ sentence_transformers
4
+ mediapipe
5
+ numpy
6
+ torchvision
7
+ opencv-python