Mrinal007 commited on
Commit
88fe2a6
·
verified ·
1 Parent(s): 6012d32

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -35
app.py CHANGED
@@ -14,7 +14,7 @@ logging.basicConfig(level=logging.INFO)
14
  logger = logging.getLogger(__name__)
15
 
16
  try:
17
- from microexpression import track_microexpressions, get_lip_engagement
18
  import mediapipe as mp
19
  except ImportError as e:
20
  logger.error(f"Failed to import required modules: {e}")
@@ -154,7 +154,7 @@ class VideoAnalyzer:
154
  logger.error(f"Error validating video file: {e}")
155
  raise
156
 
157
- def analyze_video(self, video_file) -> Tuple[str, Dict, Dict]:
158
  """
159
  Main video analysis function.
160
 
@@ -162,7 +162,7 @@ class VideoAnalyzer:
162
  video_file: Input video file
163
 
164
  Returns:
165
- Tuple of (result_json, lip_summary, micro_summary)
166
  """
167
  video_path = None
168
  try:
@@ -186,7 +186,6 @@ class VideoAnalyzer:
186
  processed_frames = 0
187
  eye_away_count = 0
188
  head_turn_count = 0
189
- lip_engagement_predictions = []
190
  emotion_prob_list = []
191
  stress_score_list = []
192
  stress_label_list = []
@@ -207,16 +206,6 @@ class VideoAnalyzer:
207
  frame, self.face_mesh, self.calibration_ref
208
  )
209
 
210
- # Lip engagement detection
211
- frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
212
- results = self.face_mesh.process(frame_rgb)
213
-
214
- lip_engagement = "No Face"
215
- if results.multi_face_landmarks:
216
- landmarks = [(lm.x, lm.y) for lm in results.multi_face_landmarks[0].landmark]
217
- lip_engagement = get_lip_engagement(landmarks)
218
- lip_engagement_predictions.append(lip_engagement)
219
-
220
  # Update microexpression counts
221
  if micro.get("eye_away", False):
222
  eye_away_count += 1
@@ -241,7 +230,6 @@ class VideoAnalyzer:
241
  cap.release()
242
 
243
  # Generate summaries
244
- lip_summary = self._generate_lip_summary(lip_engagement_predictions)
245
  micro_summary = self._generate_micro_summary(eye_away_count, head_turn_count, processed_frames)
246
  emotion_summary = self._generate_emotion_summary(emotion_prob_list)
247
  stress_summary = self._generate_stress_summary(stress_score_list, stress_label_list)
@@ -254,16 +242,14 @@ class VideoAnalyzer:
254
  "processed_frames": processed_frames,
255
  "fps": fps
256
  },
257
- "engagement": lip_summary.get("majority_label", "Unknown"),
258
  "stress": stress_summary,
259
  "emotion_probabilities": emotion_summary,
260
  "confidence_metrics": {
261
- "frames_with_face": len([x for x in lip_engagement_predictions if x != "No Face"]),
262
  "total_processed": processed_frames
263
  }
264
  }
265
 
266
- return json.dumps(result, indent=2), lip_summary, micro_summary
267
 
268
  except Exception as e:
269
  logger.error(f"Error in video analysis: {e}")
@@ -271,7 +257,7 @@ class VideoAnalyzer:
271
  "error": str(e),
272
  "status": "failed"
273
  }
274
- return json.dumps(error_result, indent=2), {}, {}
275
 
276
  finally:
277
  # Clean up temporary files
@@ -283,20 +269,8 @@ class VideoAnalyzer:
283
 
284
  def _generate_lip_summary(self, predictions: List[str]) -> Dict:
285
  """Generate lip engagement summary."""
286
- lip_counts = Counter(predictions)
287
- lip_counts_no_face = Counter({k: v for k, v in lip_counts.items() if k != "No Face"})
288
- lip_total = sum(lip_counts_no_face.values())
289
-
290
- if lip_total > 0:
291
- lip_summary = {
292
- label: f"{lip_counts_no_face.get(label, 0)} frames ({(lip_counts_no_face.get(label, 0)/lip_total)*100:.1f}%)"
293
- for label in ["Engaged", "Partially Engaged", "Not Engaged"]
294
- }
295
- lip_summary["majority_label"] = lip_counts_no_face.most_common(1)[0][0]
296
- else:
297
- lip_summary = {"No valid predictions": 0, "majority_label": "No Face"}
298
-
299
- return lip_summary
300
 
301
  def _generate_micro_summary(self, eye_away: int, head_turn: int, total_frames: int) -> Dict:
302
  """Generate microexpression summary."""
@@ -340,13 +314,11 @@ interface = gr.Interface(
340
  inputs=gr.Video(label="Upload or record a 15s video (mp4/avi/mov)"),
341
  outputs=[
342
  gr.Textbox(label="Analysis Results (JSON)", lines=10),
343
- gr.JSON(label="Lip Engagement Summary"),
344
  gr.JSON(label="Microexpression Summary")
345
  ],
346
  title="🎥 Advanced Video Analysis for SafeSpace",
347
  description="""
348
  Upload or record a video to analyze:
349
- - **Engagement Level**: Based on lip movement and facial expressions
350
  - **Emotion Recognition**: Probability distribution across 7 emotions
351
  - **Stress Detection**: Weighted stress score based on emotional state
352
  - **Microexpressions**: Eye movement and head turn analysis
 
14
  logger = logging.getLogger(__name__)
15
 
16
  try:
17
+ from microexpression import track_microexpressions
18
  import mediapipe as mp
19
  except ImportError as e:
20
  logger.error(f"Failed to import required modules: {e}")
 
154
  logger.error(f"Error validating video file: {e}")
155
  raise
156
 
157
+ def analyze_video(self, video_file) -> Tuple[str, Dict]:
158
  """
159
  Main video analysis function.
160
 
 
162
  video_file: Input video file
163
 
164
  Returns:
165
+ Tuple of (result_json, micro_summary)
166
  """
167
  video_path = None
168
  try:
 
186
  processed_frames = 0
187
  eye_away_count = 0
188
  head_turn_count = 0
 
189
  emotion_prob_list = []
190
  stress_score_list = []
191
  stress_label_list = []
 
206
  frame, self.face_mesh, self.calibration_ref
207
  )
208
 
 
 
 
 
 
 
 
 
 
 
209
  # Update microexpression counts
210
  if micro.get("eye_away", False):
211
  eye_away_count += 1
 
230
  cap.release()
231
 
232
  # Generate summaries
 
233
  micro_summary = self._generate_micro_summary(eye_away_count, head_turn_count, processed_frames)
234
  emotion_summary = self._generate_emotion_summary(emotion_prob_list)
235
  stress_summary = self._generate_stress_summary(stress_score_list, stress_label_list)
 
242
  "processed_frames": processed_frames,
243
  "fps": fps
244
  },
 
245
  "stress": stress_summary,
246
  "emotion_probabilities": emotion_summary,
247
  "confidence_metrics": {
 
248
  "total_processed": processed_frames
249
  }
250
  }
251
 
252
+ return json.dumps(result, indent=2), micro_summary
253
 
254
  except Exception as e:
255
  logger.error(f"Error in video analysis: {e}")
 
257
  "error": str(e),
258
  "status": "failed"
259
  }
260
+ return json.dumps(error_result, indent=2), {}
261
 
262
  finally:
263
  # Clean up temporary files
 
269
 
270
  def _generate_lip_summary(self, predictions: List[str]) -> Dict:
271
  """Generate lip engagement summary."""
272
+ # This method is no longer used but kept for backward compatibility
273
+ return {}
 
 
 
 
 
 
 
 
 
 
 
 
274
 
275
  def _generate_micro_summary(self, eye_away: int, head_turn: int, total_frames: int) -> Dict:
276
  """Generate microexpression summary."""
 
314
  inputs=gr.Video(label="Upload or record a 15s video (mp4/avi/mov)"),
315
  outputs=[
316
  gr.Textbox(label="Analysis Results (JSON)", lines=10),
 
317
  gr.JSON(label="Microexpression Summary")
318
  ],
319
  title="🎥 Advanced Video Analysis for SafeSpace",
320
  description="""
321
  Upload or record a video to analyze:
 
322
  - **Emotion Recognition**: Probability distribution across 7 emotions
323
  - **Stress Detection**: Weighted stress score based on emotional state
324
  - **Microexpressions**: Eye movement and head turn analysis