Mehulgarg commited on
Commit
a021008
1 Parent(s): e673df4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -50
app.py CHANGED
@@ -1,25 +1,32 @@
1
- import os
2
- import cv2
3
- import numpy as np
4
  from PIL import Image
5
- import face_recognition
6
- import streamlit as st
7
  import requests
 
 
 
8
 
9
- # Set up Streamlit
10
- st.title("Face Recognition App")
 
 
 
 
 
 
11
 
12
- # Load images from the current directory
13
  Images = []
14
  classnames = []
15
  myList = os.listdir()
 
16
  for cls in myList:
17
- if os.path.splitext(cls)[1] == ".jpg":
18
  curImg = cv2.imread(f'{cls}')
19
  Images.append(curImg)
20
  classnames.append(os.path.splitext(cls)[0])
 
21
 
22
- # Function to find face encodings
23
  def findEncodings(Images):
24
  encodeList = []
25
  for img in Images:
@@ -31,47 +38,49 @@ def findEncodings(Images):
31
  encodeListknown = findEncodings(Images)
32
  st.write('Encoding Complete')
33
 
34
- # Take a picture using Streamlit camera input
35
- img_file_buffer = st.camera_input("Take a picture")
36
-
37
- # Check if an image was taken
38
  if img_file_buffer is not None:
39
- test_image = Image.open(img_file_buffer)
40
- st.image(test_image, use_column_width=True)
41
-
42
- # Convert the image to numpy array
43
- image = np.asarray(test_image)
44
-
45
- # Resize image
46
- imgS = cv2.resize(image, (0, 0), None, 0.25, 0.25)
47
- imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
48
-
49
- # Find face locations and encodings
50
- facesCurFrame = face_recognition.face_locations(imgS)
51
- encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
52
-
53
- for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
54
- matches = face_recognition.compare_faces(encodeListknown, encodeFace)
55
- faceDis = face_recognition.face_distance(encodeListknown, encodeFace)
56
- matchIndex = np.argmin(faceDis)
57
-
58
- if matches[matchIndex]:
59
- name = classnames[matchIndex].upper()
60
- st.write(name)
61
- y1, x2, y2, x1 = faceLoc
62
- y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
63
- cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
64
- cv2.rectangle(image, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
65
- cv2.putText(image, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
66
 
67
- # Update data using requests
68
- url = "https://rgiattendance.000webhostapp.com/update.php"
69
- data1 = {'name': name}
70
- response = requests.post(url, data=data1)
 
71
 
72
- if response.status_code == 200:
73
- st.write("Data updated on: " + url)
74
- else:
75
- st.write("Data NOT updated " + url)
 
 
 
 
76
 
77
- st.image(image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras.models import load_model
 
 
2
  from PIL import Image
3
+ import numpy as np
4
+ import cv2
5
  import requests
6
+ import face_recognition
7
+ import os
8
+ from datetime import datetime
9
 
10
+ #the following are to do with this interactive notebook code
11
+ from matplotlib import pyplot as plt # this lets you draw inline pictures in the notebooks
12
+ import pylab # this allows you to control figure size
13
+ pylab.rcParams['figure.figsize'] = (10.0, 8.0) # this controls figure size in the notebook
14
+
15
+ import io
16
+ import streamlit as st
17
+ bytes_data=None
18
 
 
19
  Images = []
20
  classnames = []
21
  myList = os.listdir()
22
+ #st.write(myList)
23
  for cls in myList:
24
+ if os.path.splitext(cls)[1] == ".jpg" :
25
  curImg = cv2.imread(f'{cls}')
26
  Images.append(curImg)
27
  classnames.append(os.path.splitext(cls)[0])
28
+ st.write(classnames)
29
 
 
30
  def findEncodings(Images):
31
  encodeList = []
32
  for img in Images:
 
38
  encodeListknown = findEncodings(Images)
39
  st.write('Encoding Complete')
40
 
41
+ img_file_buffer=st.camera_input("Take a picture")
 
 
 
42
  if img_file_buffer is not None:
43
+
44
+ test_image = Image.open(img_file_buffer)
45
+ st.image(test_image, use_column_width=True)
46
+
47
+ image = np.asarray(test_image)
48
+
49
+ #########################
50
+ imgS = cv2.resize(image,(0,0),None,0.25,0.25)
51
+ imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
52
+ facesCurFrame = face_recognition.face_locations(imgS)
53
+ encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
+ for encodeFace,faceLoc in zip(encodesCurFrame,facesCurFrame):
56
+ matches = face_recognition.compare_faces(encodeListknown,encodeFace)
57
+ faceDis = face_recognition.face_distance(encodeListknown,encodeFace)
58
+ #print(faceDis)
59
+ matchIndex = np.argmin(faceDis)
60
 
61
+ if matches[matchIndex]:
62
+ name = classnames[matchIndex]
63
+ st.write(name)
64
+ y1, x2, y2, x1 = faceLoc
65
+ y1, x2, y2, x1 = y1*4,x2*4,y2*4,x1*4
66
+ cv2.rectangle(image,(x1,y1),(x2,y2),(0,255,0),2)
67
+ cv2.rectangle(image,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
68
+ cv2.putText(image,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX,1,(255, 255, 255),2)
69
 
70
+ ##############
71
+ url = "https://kiwi-whispering-plier.glitch.me/update"
72
+
73
+ data = {
74
+ 'name': name,
75
+ }
76
+ response = requests.get(url, params=data)
77
+
78
+ if response.status_code == 200 :
79
+ st.write(" data updated on : https://kiwi-whispering-plier.glitch.me" )
80
+ else : st.write("data not updated ")
81
+
82
+ ##############################
83
+
84
+ st.image(image)
85
+ if bytes_data is None:
86
+ st.stop()