UmairMirza commited on
Commit
43503a9
1 Parent(s): 5bd79e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -28
app.py CHANGED
@@ -10,7 +10,7 @@ images = []
10
  personNames = []
11
  myList = os.listdir(path)
12
  unkownEncodings=[]
13
- names=[]
14
  print(myList)
15
  for cu_img in myList:
16
  current_Img = cv2.imread(f'{path}/{cu_img}')
@@ -32,36 +32,61 @@ def faceEncodings(images):
32
  encodeListKnown = faceEncodings(images)
33
  print('All Encodings Complete!!!')
34
 
35
- def Attandance(video):
36
- cap = cv2.VideoCapture(video)
37
- index=1
38
- while True:
39
- try:
40
- ret, frame = cap.read()
41
- #faces = cv2.resize(frame, (0, 0), None, 0.25, 0.25)
42
- faces = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
43
-
44
- facesCurrentFrame = face_recognition.face_locations(faces)
45
- encodesCurrentFrame = face_recognition.face_encodings(faces, facesCurrentFrame)
46
-
47
- for encodeFace, faceLoc in zip(encodesCurrentFrame, facesCurrentFrame):
48
- matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
49
- faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
50
- # print(faceDis)
51
- matchIndex = np.argmin(faceDis)
52
-
53
- if matches[matchIndex]:
54
- name = personNames[matchIndex].upper()
55
- if names.count(name) == 0:
56
- names.append(name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
- cv2.waitKey(1)
59
- except:
60
- break
61
- return ' '.join(names)
 
 
 
 
 
62
 
63
  demo=gr.Interface(fn=Attandance,
64
- inputs="video",
65
  outputs="text",
66
  title="Face Attendance",
67
 
 
10
  personNames = []
11
  myList = os.listdir(path)
12
  unkownEncodings=[]
13
+
14
  print(myList)
15
  for cu_img in myList:
16
  current_Img = cv2.imread(f'{path}/{cu_img}')
 
32
  encodeListKnown = faceEncodings(images)
33
  print('All Encodings Complete!!!')
34
 
35
+ def Attandance(text,video,image):
36
+ names=[]
37
+ if video is not None:
38
+ cap = cv2.VideoCapture(video)
39
+ index=1
40
+ while True:
41
+ try:
42
+ ret, frame = cap.read()
43
+ #faces = cv2.resize(frame, (0, 0), None, 0.25, 0.25)
44
+ faces = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
45
+
46
+ facesCurrentFrame = face_recognition.face_locations(faces)
47
+ encodesCurrentFrame = face_recognition.face_encodings(faces, facesCurrentFrame)
48
+
49
+ for encodeFace, faceLoc in zip(encodesCurrentFrame, facesCurrentFrame):
50
+ matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
51
+ faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
52
+ # print(faceDis)
53
+ matchIndex = np.argmin(faceDis)
54
+
55
+ if matches[matchIndex]:
56
+ name = personNames[matchIndex].upper()
57
+ if names.count(name) == 0:
58
+ names.append(name)
59
+
60
+ cv2.waitKey(1)
61
+ except:
62
+ break
63
+ return ' '.join(names)
64
+ else:
65
+ try:
66
+ #faces = cv2.resize(frame, (0, 0), None, 0.25, 0.25)
67
+ faces = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
68
+
69
+ facesCurrentFrame = face_recognition.face_locations(faces)
70
+ encodesCurrentFrame = face_recognition.face_encodings(faces, facesCurrentFrame)
71
+
72
+ for encodeFace, faceLoc in zip(encodesCurrentFrame, facesCurrentFrame):
73
+ matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
74
+ faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
75
+ # print(faceDis)
76
+ matchIndex = np.argmin(faceDis)
77
 
78
+ if matches[matchIndex]:
79
+ name = personNames[matchIndex].upper()
80
+ if names.count(name) == 0:
81
+ names.append(name)
82
+
83
+ cv2.waitKey(1)
84
+ except:
85
+ break
86
+ return ' '.join(names)
87
 
88
  demo=gr.Interface(fn=Attandance,
89
+ inputs=["text","video","image"],
90
  outputs="text",
91
  title="Face Attendance",
92