LovnishVerma commited on
Commit
e89a8b7
·
verified ·
1 Parent(s): 1a30561

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -12
app.py CHANGED
@@ -6,10 +6,6 @@ import os
6
  from keras.models import load_model
7
  from PIL import Image
8
  import tempfile
9
- import pyttsx3 # Import pyttsx3 for text-to-speech
10
-
11
- # Initialize the pyttsx3 engine
12
- engine = pyttsx3.init()
13
 
14
  # Larger title
15
  st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)
@@ -39,7 +35,7 @@ face_recognizer = cv2.face.LBPHFaceRecognizer_create()
39
  def load_known_faces():
40
  folder_path = "known_faces" # Place your folder with known faces here
41
  for image_name in os.listdir(folder_path):
42
- if image_name.endswith(('.jpg', '.jpeg', '.png')): # Read only image files
43
  image_path = os.path.join(folder_path, image_name)
44
  image = cv2.imread(image_path)
45
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
@@ -88,10 +84,6 @@ def process_frame(frame):
88
  # Format the result text as "Name is feeling Emotion"
89
  result_text = f"{name} is feeling {emotion}"
90
 
91
- # Announce the detected emotion with voice output
92
- engine.say(result_text)
93
- engine.runAndWait()
94
-
95
  # Draw bounding box and label on the frame
96
  cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
97
  cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
@@ -120,9 +112,15 @@ def video_feed(video_source):
120
  upload_choice = st.sidebar.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"])
121
 
122
  if upload_choice == "Camera":
123
- # Access camera
124
- video_source = cv2.VideoCapture(0)
125
- video_feed(video_source)
 
 
 
 
 
 
126
 
127
  elif upload_choice == "Upload Image":
128
  uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg", "gif"])
 
6
  from keras.models import load_model
7
  from PIL import Image
8
  import tempfile
 
 
 
 
9
 
10
  # Larger title
11
  st.markdown("<h1 style='text-align: center;'>Emotion Detection with Face Recognition</h1>", unsafe_allow_html=True)
 
35
  def load_known_faces():
36
  folder_path = "known_faces" # Place your folder with known faces here
37
  for image_name in os.listdir(folder_path):
38
+ if image_name.endswith(('.jpg', '.jpeg', '.png')):
39
  image_path = os.path.join(folder_path, image_name)
40
  image = cv2.imread(image_path)
41
  gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
 
84
  # Format the result text as "Name is feeling Emotion"
85
  result_text = f"{name} is feeling {emotion}"
86
 
 
 
 
 
87
  # Draw bounding box and label on the frame
88
  cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
89
  cv2.putText(frame, result_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
 
112
  upload_choice = st.sidebar.radio("Choose input source", ["Upload Image", "Upload Video", "Camera"])
113
 
114
  if upload_choice == "Camera":
115
+ # Use Streamlit's built-in camera input widget for capturing images from the webcam
116
+ image = st.camera_input("Take a picture")
117
+
118
+ if image is not None:
119
+ # Convert the image to a numpy array
120
+ frame = np.array(Image.open(image))
121
+ frame, result_text = process_frame(frame)
122
+ st.image(frame, caption='Processed Image', use_column_width=True)
123
+ st.markdown(f"<h3 style='text-align: center;'>{result_text}</h3>", unsafe_allow_html=True)
124
 
125
  elif upload_choice == "Upload Image":
126
  uploaded_image = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg", "gif"])