23A475R commited on
Commit
082b842
1 Parent(s): f771ff2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -21
app.py CHANGED
@@ -76,8 +76,8 @@
76
  # demo.launch()
77
 
78
  ######################################################################################################################################################
79
-
80
  import gradio as gr
 
81
  import cv2
82
  import numpy as np
83
  import imutils
@@ -91,6 +91,7 @@ face_detection = cv2.CascadeClassifier(detection_model_path)
91
  emotion_classifier = load_model(emotion_model_path, compile=False)
92
  EMOTIONS = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt', 'unknown']
93
 
 
94
  def predict_emotion(frame):
95
  frame = imutils.resize(frame, width=300)
96
  gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
@@ -112,23 +113,40 @@ def predict_emotion(frame):
112
  (238, 164, 64), 2)
113
  return frame
114
 
115
- # Define Gradio interface
116
- inputs = [
117
- gr.inputs.Image(type="numpy", label="Upload Image"),
118
- gr.inputs.Video(type="numpy", label="Upload Video") # Allow for video input
119
- ]
120
- outputs = [
121
- gr.outputs.Video(type="numpy", label="Processed Video"), # Allow for video output
122
- gr.Image()
123
- ]
124
- examples = [
125
- "images/chandler.jpeg",
126
- "videos/input_video.mp4"
127
- ]
128
-
129
- title = "Emotion Recognition"
130
- description = "Upload an image or a video, and the model will detect emotions in faces and overlay them on the output."
131
-
132
- iface = gr.Interface(fn=predict_emotion, inputs=inputs, outputs=outputs,
133
- examples=examples, title=title, description=description)
134
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  # demo.launch()
77
 
78
  ######################################################################################################################################################
 
79
  import gradio as gr
80
+ import os
81
  import cv2
82
  import numpy as np
83
  import imutils
 
91
  emotion_classifier = load_model(emotion_model_path, compile=False)
92
  EMOTIONS = ['neutral', 'happiness', 'surprise', 'sadness', 'anger', 'disgust', 'fear', 'contempt', 'unknown']
93
 
94
+ # Define a function to process each frame for emotion prediction
95
  def predict_emotion(frame):
96
  frame = imutils.resize(frame, width=300)
97
  gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
 
113
  (238, 164, 64), 2)
114
  return frame
115
 
116
+ # Define a function to process video input and output
117
+ def process_video(input_video_path, output_video_path):
118
+ # Open the video capture
119
+ cap = cv2.VideoCapture(input_video_path)
120
+ # Get video properties (dimensions, frame rate)
121
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
122
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
123
+ fps = cap.get(cv2.CAP_PROP_FPS)
124
+ # Define video writer for output
125
+ out = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc(*'XVID'), fps, (width, height))
126
+
127
+ # Process each frame in the video
128
+ while True:
129
+ ret, frame = cap.read()
130
+ if not ret:
131
+ break
132
+ frame_with_emotion = predict_emotion(frame)
133
+ out.write(frame_with_emotion)
134
+
135
+ # Release video capture and writer
136
+ cap.release()
137
+ out.release()
138
+
139
+ # Define the Gradio interface
140
+ demo = gr.Interface(
141
+ fn=process_video,
142
+ inputs=["video", "file"], # Allow video input from webcam or file
143
+ outputs="video", # Output video with emotion overlay
144
+ capture_session=True, # Maintain capture session for video input
145
+ title="Emotion Detection in Video",
146
+ description="Upload a video file or use your webcam to detect emotions in real-time.",
147
+ theme="huggingface",
148
+ )
149
+
150
+ # Launch the Gradio interface
151
+ if __name__ == "__main__":
152
+ demo.launch()