Spaces:
Sleeping
Sleeping
import gradio as gr | |
from deepface import DeepFace | |
from transformers import pipeline | |
import tempfile | |
import cv2 | |
import moviepy.editor as mp | |
def analyze_text(text): | |
classifier = pipeline("sentiment-analysis") | |
return classifier(text)[0]['label'] | |
def analyze_video_emotion(video_file): | |
try: | |
# Save the uploaded video to a temp file | |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp: | |
tmp.write(video_file.read()) | |
tmp_path = tmp.name | |
# Extract frames using MoviePy (more reliable than OpenCV alone) | |
video = mp.VideoFileClip(tmp_path) | |
frames = list(video.iter_frames()) | |
emotions = [] | |
for frame in frames[:60]: # Limit to first 60 frames | |
try: | |
# Use DeepFace for emotion detection | |
result = DeepFace.analyze(frame, actions=['emotion'], enforce_detection=False) | |
emotions.append(result[0]['dominant_emotion']) | |
except Exception as e: | |
print("Error analyzing frame:", e) | |
if emotions: | |
# Return the most common emotion | |
return max(set(emotions), key=emotions.count) | |
else: | |
return "No face detected" | |
except Exception as e: | |
print("Error processing video:", e) | |
return "Error processing video file" | |
def process_all(text_input, video_input): | |
text_result = analyze_text(text_input) | |
video_result = analyze_video_emotion(video_input) | |
return f"Text Sentiment: {text_result}\nFacial Emotion: {video_result}" | |
iface = gr.Interface( | |
fn=process_all, | |
inputs=[ | |
gr.Textbox(label="Enter Social Media Text"), | |
gr.Video(label="Upload a Video Clip") | |
], | |
outputs="text", | |
title="Emotion & Sentiment Decoder", | |
description="Analyzes social media text & facial expressions from video." | |
) | |
iface.launch() |