import gradio as gr from src.models import ModelManager, AudioProcessor, Analyzer from src.utils import visualizer, GPUOptimizer, ModelCache # Initialize components optimizer = GPUOptimizer() optimizer.optimize() model_manager = ModelManager() audio_processor = AudioProcessor() analyzer = Analyzer(model_manager, audio_processor) cache = ModelCache() def process_audio(audio_file): try: # Check cache with open(audio_file, 'rb') as f: cache_key = cache.get_cache_key(f.read()) cached_result = cache.cache_result(cache_key, None) if cached_result: return cached_result # Process audio results = analyzer.analyze(audio_file) # Format outputs outputs = ( results['transcription'], visualizer.create_emotion_plot(results['emotions']['scores']), _format_indicators(results['mental_health_indicators']) ) # Cache results cache.cache_result(cache_key, outputs) return outputs except Exception as e: return str(e), "Error in analysis", "Error in analysis" def _format_indicators(indicators): return f""" ### Mental Health Indicators - Depression Risk: {indicators['depression_risk']:.2f} - Anxiety Risk: {indicators['anxiety_risk']:.2f} - Stress Level: {indicators['stress_level']:.2f} """ interface = gr.Interface( fn=process_audio, inputs=gr.Audio(source="microphone", type="filepath"), outputs=[ gr.Textbox(label="Transcription"), gr.HTML(label="Emotion Analysis"), gr.Markdown(label="Mental Health Indicators") ], title="Vocal Biomarker Analysis", description="Analyze voice for emotional and mental health indicators" ) interface.launch()