Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
import torch | |
from PIL import Image | |
import numpy as np | |
import librosa | |
# Load T5 model for simplification | |
tokenizer = AutoTokenizer.from_pretrained("t5-base") | |
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") | |
# Dummy function for stress detection from voice (replace with your actual model) | |
def detect_stress_from_voice(audio_path): | |
# For demo, let's randomly return 'low' or 'high' stress | |
# You will replace this with real stress detection logic | |
return "high" | |
# Dummy function for stress detection from face image (replace with your actual model) | |
def detect_stress_from_face(image): | |
# For demo, randomly return 'low' or 'high' stress | |
return "high" | |
def simplify_task(task, stress_level): | |
if stress_level == "low": | |
return task # No simplification needed if stress is low | |
input_text = "simplify: " + task | |
inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=512, truncation=True) | |
outputs = model.generate(inputs, max_length=60, num_beams=4, early_stopping=True) | |
simplified_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return simplified_text | |
def assistant(voice, face_image, task): | |
# Step 1: Detect stress from voice and face image | |
voice_stress = detect_stress_from_voice(voice.name) | |
face_stress = detect_stress_from_face(face_image) | |
# Combine stress signals (simple majority vote) | |
stress_level = "high" if (voice_stress == "high" or face_stress == "high") else "low" | |
# Step 2: Simplify the task based on stress level | |
simplified = simplify_task(task, stress_level) | |
# Return stress level and simplified task | |
return f"Detected Stress Level: {stress_level.capitalize()}", simplified | |
with gr.Blocks() as demo: | |
gr.Markdown("# Context-Aware Multimodal Assistant") | |
gr.Markdown("Upload your voice recording and face image, then type your task below.") | |
voice_input = gr.Audio(label="Upload your voice recording (.wav)", type="filepath") | |
face_input = gr.Image(label="Upload your face image") | |
task_input = gr.Textbox(label="π What are you trying to do or say?", placeholder="E.g. I need help writing a message to my manager.") | |
output_stress = gr.Textbox(label="π§ Stress Level Detected", interactive=False) | |
output_simplified = gr.Textbox(label="π¬ Simplified Task / Message", interactive=False) | |
submit_btn = gr.Button("Simplify Task") | |
submit_btn.click( | |
fn=assistant, | |
inputs=[voice_input, face_input, task_input], | |
outputs=[output_stress, output_simplified] | |
) | |
if __name__ == "__main__": | |
demo.launch() | |