Spaces:
Running
on
Zero
Running
on
Zero
| """ | |
| Secure AI Agents Suite - HuggingFace Spaces Application | |
| Autonomous AI agents with planning, reasoning, and execution capabilities | |
| """ | |
| import os | |
| import sys | |
| import json | |
| import logging | |
| import asyncio | |
| import traceback | |
| import time | |
| import psutil | |
| from datetime import datetime, timedelta | |
| from typing import Dict, List, Any, Optional, Tuple, Union | |
| import gc | |
| import warnings | |
| # HuggingFace Spaces support | |
| try: | |
| import spaces | |
| SPACES_AVAILABLE = True | |
| except ImportError: | |
| SPACES_AVAILABLE = False | |
| # Suppress warnings for cleaner output | |
| warnings.filterwarnings("ignore") | |
| # Configure logging | |
| logging.basicConfig( | |
| level=logging.INFO, | |
| format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', | |
| handlers=[ | |
| logging.StreamHandler(sys.stdout), | |
| logging.FileHandler('/tmp/spaces.log', mode='a') | |
| ] | |
| ) | |
| logger = logging.getLogger(__name__) | |
| # Import our autonomous engine | |
| try: | |
| from autonomous_engine import ( | |
| RefactoredAutonomousAgent, | |
| ValidationError, | |
| SecurityError, | |
| ExecutionError, | |
| TaskStatus, | |
| Priority | |
| ) | |
| logger.info("Successfully imported autonomous engine components") | |
| except ImportError as e: | |
| logger.error(f"Failed to import autonomous engine: {e}") | |
| # Fallback for development | |
| sys.path.append(os.path.dirname(os.path.abspath(__file__))) | |
| try: | |
| import gradio as gr | |
| from gradio.themes import Soft, Glass # Removed 'ocean' theme for compatibility | |
| import pandas as pd | |
| import plotly.express as px | |
| import plotly.graph_objects as go | |
| from plotly.subplots import make_subplots | |
| import numpy as np | |
| except ImportError as e: | |
| logger.error(f"Failed to import required libraries: {e}") | |
| # Create fallback for missing dependencies | |
| class MockGradio: | |
| def __init__(self): | |
| self.Interface = self._mock_interface | |
| self.Textbox = self._mock_textbox | |
| self.Button = self._mock_button | |
| self.Dropdown = self._mock_dropdown | |
| self.Slider = self._mock_slider | |
| self.Checkbox = self._mock_checkbox | |
| self.Row = self._mock_row | |
| self.Column = self._mock_column | |
| self.HTML = self._mock_html | |
| self.JSON = self._mock_json | |
| self.File = self._mock_file | |
| self.Image = self._mock_image | |
| self.Audio = self._mock_audio | |
| self.Video = self._mock_video | |
| self.Plot = self._mock_plot | |
| self.DataFrame = self._mock_dataframe | |
| self.Markdown = self._mock_markdown | |
| self.Accordion = self._mock_accordion | |
| self.Tabs = self._mock_tabs | |
| self.TabItem = self._mock_tab_item | |
| self.Blocks = self._mock_blocks | |
| # Add themes attribute for compatibility | |
| self.themes = self._mock_themes() | |
| def _mock_themes(self): | |
| class MockThemes: | |
| def GoogleFont(self, font_name): | |
| return font_name | |
| return MockThemes() | |
| def _mock_blocks(self, *args, **kwargs): | |
| class MockBlocksContext: | |
| def __enter__(self): | |
| return self | |
| def __exit__(self, *args): | |
| pass | |
| def launch(self, *args, **kwargs): | |
| # Mock launch method for compatibility | |
| pass | |
| return MockBlocksContext() | |
| def _mock_interface(self, fn, inputs, outputs, title=None, description=None, theme=None): | |
| return None | |
| def _mock_textbox(self, label=None, placeholder=None, lines=None, max_lines=None, type="text"): | |
| return None | |
| def _mock_button(self, label=None, variant="primary"): | |
| return None | |
| def _mock_dropdown(self, choices=None, label=None, value=None): | |
| return None | |
| def _mock_slider(self, minimum=None, maximum=None, value=None, step=None, label=None): | |
| return None | |
| def _mock_checkbox(self, label=None, value=False): | |
| return None | |
| def _mock_row(self, *args, **kwargs): | |
| return None | |
| def _mock_column(self, *args, **kwargs): | |
| return None | |
| def _mock_html(self, value=None): | |
| return None | |
| def _mock_json(self, value=None): | |
| return None | |
| def _mock_file(self, label=None, file_count="single", file_types=None): | |
| return None | |
| def _mock_image(self, label=None, type="filepath"): | |
| return None | |
| def _mock_audio(self, label=None, type="filepath"): | |
| return None | |
| def _mock_video(self, label=None, type="filepath"): | |
| return None | |
| def _mock_plot(self, figure=None): | |
| return None | |
| def _mock_dataframe(self, value=None, headers=None): | |
| return None | |
| def _mock_markdown(self, value=None): | |
| return None | |
| def _mock_accordion(self, label=None, open=True): | |
| return None | |
| def _mock_tabs(self): | |
| return None | |
| def _mock_tab_item(self, label=None): | |
| return None | |
| gr = MockGradio() | |
| logger.warning("Using mock Gradio components due to import failure") | |
| class SpacesConfig: | |
| """Configuration management for HuggingFace Spaces deployment.""" | |
| def __init__(self): | |
| self.config = { | |
| "app_name": "Secure AI Agents Suite", | |
| "version": "2.0.0", | |
| "author": "AI Engineering Team", | |
| "description": "Autonomous AI agents with advanced planning, reasoning, and execution capabilities", | |
| "max_input_length": 10000, | |
| "max_execution_time": 300, # 5 minutes | |
| "memory_limit": "2GB", | |
| "enable_caching": True, | |
| "enable_logging": True, | |
| "debug_mode": os.getenv("DEBUG", "false").lower() == "true", | |
| "api_rate_limit": 100, # requests per minute | |
| "enable_security": True, | |
| "enable_performance_monitoring": True | |
| } | |
| # Load environment variables | |
| self.config.update({ | |
| "max_concurrent_requests": int(os.getenv("MAX_CONCURRENT_REQUESTS", "5")), | |
| "cache_ttl": int(os.getenv("CACHE_TTL", "3600")), # 1 hour | |
| "enable_gpu": os.getenv("ENABLE_GPU", "false").lower() == "true", | |
| "model_cache_size": int(os.getenv("MODEL_CACHE_SIZE", "1000")), | |
| "log_level": os.getenv("LOG_LEVEL", "INFO"), | |
| "metrics_retention_days": int(os.getenv("METRICS_RETENTION_DAYS", "7")) | |
| }) | |
| logger.info(f"Configuration loaded: {len(self.config)} parameters") | |
| class AgentManager: | |
| """Manages agent instances and resources for the Spaces deployment.""" | |
| def __init__(self, config: SpacesConfig): | |
| self.config = config | |
| self.agents = {} | |
| self.agent_stats = {} | |
| self._performance_metrics = { | |
| "total_requests": 0, | |
| "successful_requests": 0, | |
| "failed_requests": 0, | |
| "average_response_time": 0.0, | |
| "memory_usage": [], | |
| "cpu_usage": [], | |
| "timestamps": [] | |
| } | |
| # Don't initialize agents during construction to avoid pickling issues | |
| # Agents will be created lazily when first requested | |
| logger.info("Agent manager initialized (agents will be created lazily)") | |
| def __getstate__(self): | |
| """Custom pickling to handle non-serializable objects.""" | |
| state = self.__dict__.copy() | |
| # Remove any non-serializable objects if they exist | |
| # Remove performance metrics that might contain non-serializable objects | |
| if '_performance_metrics' in state: | |
| # Create a copy without thread-locked objects | |
| safe_metrics = state['_performance_metrics'].copy() | |
| # Ensure all values are serializable | |
| for key, value in safe_metrics.items(): | |
| if hasattr(value, '__dict__') and not isinstance(value, (list, dict, str, int, float, bool)): | |
| safe_metrics[key] = str(value) | |
| state['_performance_metrics'] = safe_metrics | |
| return state | |
| def __setstate__(self, state): | |
| """Custom unpickling to restore object state.""" | |
| self.__dict__.update(state) | |
| # Reinitialize any necessary components | |
| # Ensure performance metrics is properly initialized | |
| if '_performance_metrics' not in self.__dict__: | |
| self._performance_metrics = { | |
| "total_requests": 0, | |
| "successful_requests": 0, | |
| "failed_requests": 0, | |
| "average_response_time": 0.0, | |
| "memory_usage": [], | |
| "cpu_usage": [], | |
| "timestamps": [] | |
| } | |
| def _initialize_default_agents(self): | |
| """Initialize default agent instances lazily.""" | |
| if self.agents: # Already initialized | |
| return | |
| try: | |
| # Initialize different agent types for different use cases | |
| agent_types = [ | |
| "EnterpriseAgent", | |
| "ConsumerAgent", | |
| "CreativeAgent", | |
| "VoiceAgent", | |
| "GeneralAgent" | |
| ] | |
| for agent_type in agent_types: | |
| try: | |
| agent = RefactoredAutonomousAgent(agent_type) | |
| self.agents[agent_type] = agent | |
| self.agent_stats[agent_type] = { | |
| "requests_handled": 0, | |
| "success_rate": 0.0, | |
| "average_response_time": 0.0, | |
| "last_used": None | |
| } | |
| logger.info(f"Initialized {agent_type} agent") | |
| except Exception as e: | |
| logger.error(f"Failed to initialize {agent_type} agent: {e}") | |
| # Add a general-purpose agent as fallback | |
| if "GeneralAgent" not in self.agents: | |
| self.agents["GeneralAgent"] = RefactoredAutonomousAgent("GeneralAgent") | |
| self.agent_stats["GeneralAgent"] = { | |
| "requests_handled": 0, | |
| "success_rate": 0.0, | |
| "average_response_time": 0.0, | |
| "last_used": None | |
| } | |
| logger.info("Initialized fallback GeneralAgent") | |
| except Exception as e: | |
| logger.error(f"Failed to initialize agents: {e}") | |
| # Create a minimal fallback agent | |
| self.agents["FallbackAgent"] = RefactoredAutonomousAgent("FallbackAgent") | |
| self.agent_stats["FallbackAgent"] = { | |
| "requests_handled": 0, | |
| "success_rate": 0.0, | |
| "average_response_time": 0.0, | |
| "last_used": None | |
| } | |
| def get_agent(self, agent_type: str = "GeneralAgent") -> Optional[RefactoredAutonomousAgent]: | |
| """Get an agent instance by type, creating agents lazily.""" | |
| # Initialize agents on first request | |
| if not self.agents: | |
| self._initialize_default_agents() | |
| # Return requested agent or fallback | |
| return self.agents.get(agent_type, self.agents.get("GeneralAgent")) | |
| def update_performance_metrics(self, agent_type: str, response_time: float, success: bool): | |
| """Update performance metrics for monitoring.""" | |
| self._performance_metrics["total_requests"] += 1 | |
| if success: | |
| self._performance_metrics["successful_requests"] += 1 | |
| else: | |
| self._performance_metrics["failed_requests"] += 1 | |
| # Update agent-specific stats | |
| if agent_type in self.agent_stats: | |
| stats = self.agent_stats[agent_type] | |
| stats["requests_handled"] += 1 | |
| stats["last_used"] = datetime.utcnow() | |
| # Update success rate | |
| stats["success_rate"] = ( | |
| (stats["success_rate"] * (stats["requests_handled"] - 1) + (1 if success else 0)) | |
| / stats["requests_handled"] | |
| ) | |
| # Update average response time | |
| stats["average_response_time"] = ( | |
| (stats["average_response_time"] * (stats["requests_handled"] - 1) + response_time) | |
| / stats["requests_handled"] | |
| ) | |
| # Update global metrics | |
| self._performance_metrics["average_response_time"] = ( | |
| (self._performance_metrics["average_response_time"] * (self._performance_metrics["total_requests"] - 1) + response_time) | |
| / self._performance_metrics["total_requests"] | |
| ) | |
| # Add system metrics | |
| self._performance_metrics["memory_usage"].append(psutil.virtual_memory().percent) | |
| self._performance_metrics["cpu_usage"].append(psutil.cpu_percent()) | |
| self._performance_metrics["timestamps"].append(datetime.utcnow().isoformat()) | |
| # Keep only recent metrics | |
| max_metrics = 1000 | |
| for key in ["memory_usage", "cpu_usage", "timestamps"]: | |
| if len(self._performance_metrics[key]) > max_metrics: | |
| self._performance_metrics[key] = self._performance_metrics[key][-max_metrics:] | |
| def get_performance_summary(self) -> Dict[str, Any]: | |
| """Get performance summary for monitoring.""" | |
| return { | |
| "total_requests": self._performance_metrics["total_requests"], | |
| "success_rate": ( | |
| self._performance_metrics["successful_requests"] / max(1, self._performance_metrics["total_requests"]) | |
| ), | |
| "average_response_time": self._performance_metrics["average_response_time"], | |
| "current_memory_usage": psutil.virtual_memory().percent, | |
| "current_cpu_usage": psutil.cpu_percent(), | |
| "agent_stats": self.agent_stats, | |
| "system_health": self._get_system_health() | |
| } | |
| def _get_system_health(self) -> str: | |
| """Assess overall system health.""" | |
| memory_usage = psutil.virtual_memory().percent | |
| cpu_usage = psutil.cpu_percent() | |
| if memory_usage > 90 or cpu_usage > 90: | |
| return "critical" | |
| elif memory_usage > 75 or cpu_usage > 75: | |
| return "warning" | |
| else: | |
| return "healthy" | |
| class RequestValidator: | |
| """Validates and sanitizes incoming requests.""" | |
| def validate_input(user_input: str, max_length: int = 10000) -> Tuple[bool, str]: | |
| """Validate user input for safety and length.""" | |
| if not user_input or not isinstance(user_input, str): | |
| return False, "Input cannot be empty" | |
| if len(user_input.strip()) == 0: | |
| return False, "Input cannot be empty or whitespace only" | |
| if len(user_input) > max_length: | |
| return False, f"Input too long. Maximum {max_length} characters allowed" | |
| # Check for potentially dangerous content | |
| dangerous_patterns = [ | |
| r'<script.*?>.*?</script>', | |
| r'javascript:', | |
| r'on\w+\s*=', | |
| r'eval\s*\(', | |
| r'exec\s*\(', | |
| r'__import__', | |
| r'subprocess', | |
| r'os\.system', | |
| r'shell=True' | |
| ] | |
| import re | |
| for pattern in dangerous_patterns: | |
| if re.search(pattern, user_input, re.IGNORECASE): | |
| return False, f"Potentially dangerous content detected: {pattern}" | |
| return True, "Input is valid" | |
| def validate_context(context: Dict[str, Any]) -> Tuple[bool, str]: | |
| """Validate context parameters.""" | |
| if not isinstance(context, dict): | |
| return False, "Context must be a dictionary" | |
| # Check for reasonable context size | |
| context_size = len(json.dumps(context)) | |
| if context_size > 100000: # 100KB limit | |
| return False, "Context too large" | |
| return True, "Context is valid" | |
| class SpacesApp: | |
| """Main application class for HuggingFace Spaces deployment.""" | |
| def __init__(self): | |
| self.config = SpacesConfig() | |
| self.agent_manager = AgentManager(self.config) | |
| self._validator = None | |
| self.start_time = datetime.utcnow() | |
| # Setup logging level | |
| logging.getLogger().setLevel(getattr(logging, self.config.config["log_level"])) | |
| logger.info("Spaces application initialized successfully") | |
| def __getstate__(self): | |
| """Custom pickling to handle non-serializable objects.""" | |
| state = self.__dict__.copy() | |
| # Remove any non-serializable objects | |
| state['_validator'] = None # Will be reinitialized on unpickling | |
| return state | |
| def __setstate__(self, state): | |
| """Custom unpickling to restore object state.""" | |
| self.__dict__.update(state) | |
| # Reinitialize validator on demand | |
| self._validator = None | |
| def validator(self): | |
| """Lazy initialization of RequestValidator.""" | |
| if self._validator is None: | |
| self._validator = RequestValidator() | |
| return self._validator | |
| def create_interface(self): | |
| """Create the main Gradio interface.""" | |
| try: | |
| # Create theme | |
| theme = Soft( | |
| primary_hue="blue", | |
| secondary_hue="gray", | |
| neutral_hue="slate", | |
| font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"] | |
| ) | |
| with gr.Blocks( | |
| title="Secure AI Agents Suite", | |
| analytics_enabled=False | |
| ) as interface: | |
| # Header section | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| gr.HTML(""" | |
| <div style="text-align: center; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 15px; color: white; margin-bottom: 20px;"> | |
| <h1 style="margin: 0; font-size: 2.5em; font-weight: 700;">🤖 Secure AI Agents Suite</h1> | |
| <p style="margin: 10px 0 0 0; font-size: 1.2em; opacity: 0.9;">Autonomous AI agents with advanced planning, reasoning, and execution capabilities</p> | |
| </div> | |
| """) | |
| # Agent selection and input section | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| gr.HTML("<h3>🎯 Select Agent Type</h3>") | |
| agent_type = gr.Dropdown( | |
| choices=["EnterpriseAgent", "ConsumerAgent", "CreativeAgent", "VoiceAgent", "GeneralAgent"], | |
| value="GeneralAgent", | |
| label="Choose Agent Type" | |
| ) | |
| with gr.Column(scale=3): | |
| gr.HTML("<h3>📝 Your Request</h3>") | |
| user_input = gr.Textbox( | |
| label="Describe what you need", | |
| placeholder="Enter your request here... (e.g., 'Create a comprehensive marketing plan for our new product launch')", | |
| lines=4, | |
| max_lines=10 | |
| ) | |
| # Context input (optional) | |
| with gr.Accordion("🔧 Advanced Settings", open=False): | |
| with gr.Row(): | |
| with gr.Column(): | |
| context_input = gr.JSON( | |
| label="Context Data (Optional)", | |
| value={} | |
| ) | |
| with gr.Column(): | |
| max_execution_time = gr.Slider( | |
| minimum=30, | |
| maximum=300, | |
| value=120, | |
| step=30, | |
| label="Max Execution Time (seconds)" | |
| ) | |
| # Action buttons | |
| with gr.Row(): | |
| with gr.Column(): | |
| submit_btn = gr.Button( | |
| "🚀 Execute Request", | |
| variant="primary", | |
| size="lg" | |
| ) | |
| with gr.Column(): | |
| clear_btn = gr.Button( | |
| "🗑️ Clear", | |
| variant="secondary", | |
| size="lg" | |
| ) | |
| # Progress indicator | |
| progress_bar = gr.HTML(""" | |
| <div id="progress-container" style="display: none; margin: 20px 0;"> | |
| <div style="background-color: #f3f4f6; border-radius: 10px; overflow: hidden;"> | |
| <div id="progress-bar" style="background: linear-gradient(90deg, #3b82f6, #1d4ed8); height: 20px; width: 0%; transition: width 0.3s ease; border-radius: 10px;"></div> | |
| </div> | |
| <p id="progress-text" style="text-align: center; margin: 10px 0; font-weight: 500;">Processing...</p> | |
| </div> | |
| """) | |
| # Results section | |
| with gr.Tab("📊 Results"): | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| # Main results | |
| results_output = gr.JSON( | |
| label="Execution Results" | |
| ) | |
| # Summary | |
| summary_output = gr.Markdown( | |
| label="Summary", | |
| value="Results will appear here after execution" | |
| ) | |
| with gr.Column(scale=1): | |
| # Performance metrics | |
| gr.HTML("<h3>⚡ Performance</h3>") | |
| performance_output = gr.JSON( | |
| label="Performance Metrics" | |
| ) | |
| # System status | |
| system_status = gr.JSON( | |
| label="System Status" | |
| ) | |
| # Analytics tab | |
| with gr.Tab("📈 Analytics"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.HTML("<h3>📊 Performance Analytics</h3>") | |
| metrics_plot = gr.Plot( | |
| label="Performance Metrics Over Time" | |
| ) | |
| resource_plot = gr.Plot( | |
| label="Resource Usage" | |
| ) | |
| with gr.Column(): | |
| gr.HTML("<h3>🎯 Agent Statistics</h3>") | |
| agent_stats_table = gr.DataFrame( | |
| label="Agent Performance", | |
| headers=["Agent Type", "Requests Handled", "Success Rate", "Avg Response Time", "Last Used"] | |
| ) | |
| # Event handlers | |
| submit_btn.click( | |
| fn=self.process_request_with_progress, | |
| inputs=[agent_type, user_input, context_input, max_execution_time], | |
| outputs=[results_output, summary_output, performance_output, system_status, metrics_plot, resource_plot, agent_stats_table], | |
| show_progress="full" | |
| ) | |
| clear_btn.click( | |
| fn=self.clear_interface, | |
| inputs=[], | |
| outputs=[user_input, context_input, results_output, summary_output, performance_output, system_status] | |
| ) | |
| # Load initial analytics | |
| interface.load( | |
| fn=self.load_initial_analytics, | |
| inputs=[], | |
| outputs=[metrics_plot, resource_plot, agent_stats_table, system_status] | |
| ) | |
| logger.info("Interface created successfully") | |
| return interface | |
| except Exception as e: | |
| logger.error(f"Failed to create interface: {e}") | |
| logger.error(traceback.format_exc()) | |
| # Return a minimal fallback interface | |
| with gr.Blocks() as fallback_interface: | |
| gr.HTML("<h1>⚠️ Application Error</h1>") | |
| gr.HTML(f"<p>Failed to initialize application: {e}</p>") | |
| gr.HTML("<p>Please check the logs and try again later.</p>") | |
| return fallback_interface | |
| async def process_request_with_progress(self, agent_type: str, user_input: str, context: Dict[str, Any], max_execution_time: int): | |
| """Process request with progress updates.""" | |
| try: | |
| # Update progress | |
| yield ( | |
| None, # results | |
| "🔍 **Analyzing Request**\n\nValidating input and analyzing requirements...", # summary | |
| {"status": "processing", "message": "Starting analysis"}, # performance | |
| {"status": "loading", "message": "Initializing system"}, # system | |
| None, # metrics plot | |
| None, # resource plot | |
| None # agent stats | |
| ) | |
| # Validate input | |
| is_valid, validation_message = self.validator.validate_input(user_input) | |
| if not is_valid: | |
| yield ( | |
| {"error": validation_message}, | |
| f"❌ **Validation Failed**\n\n{validation_message}", | |
| {"status": "error", "message": validation_message}, | |
| {"status": "error", "message": "Validation failed"}, | |
| None, None, None | |
| ) | |
| return | |
| # Validate context | |
| is_valid_context, context_message = self.validator.validate_context(context) | |
| if not is_valid_context: | |
| yield ( | |
| {"error": context_message}, | |
| f"❌ **Context Validation Failed**\n\n{context_message}", | |
| {"status": "error", "message": context_message}, | |
| {"status": "error", "message": "Context validation failed"}, | |
| None, None, None | |
| ) | |
| return | |
| # Get agent | |
| agent = self.agent_manager.get_agent(agent_type) | |
| if not agent: | |
| yield ( | |
| {"error": f"Agent type '{agent_type}' not available"}, | |
| f"❌ **Agent Unavailable**\n\nAgent type '{agent_type}' is not available. Using fallback agent.", | |
| {"status": "error", "message": "Agent not available"}, | |
| {"status": "warning", "message": "Using fallback agent"}, | |
| None, None, None | |
| ) | |
| agent = self.agent_manager.get_agent("GeneralAgent") | |
| # Process request | |
| start_time = time.time() | |
| yield ( | |
| None, # results | |
| f"🧠 **Processing with {agent_type}**\n\nAnalyzing situation and creating execution plan...", # summary | |
| {"status": "analyzing", "message": "Running analysis and planning"}, # performance | |
| {"status": "processing", "message": "Executing autonomous workflow"}, # system | |
| None, # metrics plot | |
| None, # resource plot | |
| None # agent stats | |
| ) | |
| # Execute the request | |
| try: | |
| # Add timeout | |
| result = await asyncio.wait_for( | |
| agent.process_request(user_input, context), | |
| timeout=max_execution_time | |
| ) | |
| response_time = time.time() - start_time | |
| # Update performance metrics | |
| self.agent_manager.update_performance_metrics( | |
| agent_type, response_time, result.get("overall_success", False) | |
| ) | |
| # Generate summary | |
| summary = self.generate_summary(result) | |
| # Get performance data | |
| performance_data = self.agent_manager.get_performance_summary() | |
| # Get analytics | |
| analytics_data = self.get_analytics_data() | |
| yield ( | |
| result, # results | |
| summary, # summary | |
| performance_data, # performance | |
| {"status": "healthy", "message": "Request completed successfully"}, # system | |
| analytics_data["metrics_plot"], # metrics plot | |
| analytics_data["resource_plot"], # resource plot | |
| analytics_data["agent_stats_table"] # agent stats | |
| ) | |
| except asyncio.TimeoutError: | |
| yield ( | |
| {"error": f"Request timed out after {max_execution_time} seconds"}, | |
| f"⏰ **Timeout**\n\nRequest execution exceeded {max_execution_time} seconds. Please try with a simpler request or increase the timeout.", | |
| {"status": "timeout", "message": f"Execution timeout after {max_execution_time}s"}, | |
| {"status": "warning", "message": "Request timed out"}, | |
| None, None, None | |
| ) | |
| except Exception as e: | |
| logger.error(f"Request processing failed: {e}") | |
| logger.error(traceback.format_exc()) | |
| yield ( | |
| {"error": str(e)}, | |
| f"❌ **Processing Error**\n\nAn unexpected error occurred: {e}", | |
| {"status": "error", "message": f"Processing failed: {e}"}, | |
| {"status": "error", "message": "System error occurred"}, | |
| None, None, None | |
| ) | |
| def generate_summary(self, result: Dict[str, Any]) -> str: | |
| """Generate a formatted summary from results.""" | |
| try: | |
| if not result.get("overall_success", False): | |
| return f"❌ **Execution Failed**\n\n{result.get('error', 'Unknown error occurred')}" | |
| # Extract key information | |
| analysis = result.get("analysis", {}) | |
| plan = result.get("plan", {}) | |
| execution = result.get("execution", {}) | |
| # Create summary | |
| summary = f"""✅ **Execution Completed Successfully** | |
| 🧠 **Analysis**: {analysis.get('intent', {}).get('primary', 'general')} intent detected | |
| 📊 **Complexity**: {analysis.get('complexity', {}).get('level', 'medium')} ({analysis.get('complexity', {}).get('score', 0)}/10) | |
| 📋 **Planning**: {len(plan.get('tasks', []))} tasks planned | |
| ⚡ **Execution**: {execution.get('completed_tasks', 0)} tasks completed ({execution.get('success_rate', 0):.0%} success rate) | |
| **Key Insights:** | |
| • Success probability: {analysis.get('success_probability', 0):.0%} | |
| • Processing time: {execution.get('execution_time_minutes', 0):.1f} minutes | |
| • Adaptations made: {execution.get('adaptations_made', 0)} | |
| • Autonomous decisions: {execution.get('decisions_made', 0)} | |
| {result.get('summary', 'No additional summary available')}""" | |
| return summary | |
| except Exception as e: | |
| logger.error(f"Failed to generate summary: {e}") | |
| return f"✅ **Execution Completed**\n\nResults processed successfully. (Summary generation failed: {e})" | |
| def clear_interface(self): | |
| """Clear all interface inputs and outputs.""" | |
| return ( | |
| "", # user_input | |
| {}, # context | |
| {}, # results | |
| "Results cleared. Ready for new request.", # summary | |
| {}, # performance | |
| {} # system | |
| ) | |
| def load_initial_analytics(self): | |
| """Load initial analytics data.""" | |
| try: | |
| analytics_data = self.get_analytics_data() | |
| system_status = self.agent_manager.get_performance_summary() | |
| return ( | |
| analytics_data["metrics_plot"], | |
| analytics_data["resource_plot"], | |
| analytics_data["agent_stats_table"], | |
| system_status | |
| ) | |
| except Exception as e: | |
| logger.error(f"Failed to load initial analytics: {e}") | |
| return (None, None, None, {"status": "error", "message": str(e)}) | |
| def get_analytics_data(self): | |
| """Get analytics data for plots and tables.""" | |
| try: | |
| # Get performance metrics | |
| metrics = self.agent_manager._performance_metrics | |
| # Create metrics plot | |
| if len(metrics["timestamps"]) > 1: | |
| fig_metrics = go.Figure() | |
| fig_metrics.add_trace(go.Scatter( | |
| x=metrics["timestamps"], | |
| y=[1 if i < metrics["successful_requests"] else 0 for i in range(metrics["total_requests"])], | |
| mode='lines+markers', | |
| name='Success Rate', | |
| line=dict(color='#10b981', width=3) | |
| )) | |
| fig_metrics.add_trace(go.Scatter( | |
| x=metrics["timestamps"], | |
| y=metrics["memory_usage"], | |
| mode='lines+markers', | |
| name='Memory Usage (%)', | |
| yaxis='y2', | |
| line=dict(color='#3b82f6', width=2) | |
| )) | |
| fig_metrics.update_layout( | |
| title="Performance Metrics Over Time", | |
| xaxis_title="Time", | |
| yaxis_title="Success Rate", | |
| yaxis2=dict( | |
| title="Memory Usage (%)", | |
| overlaying="y", | |
| side="right" | |
| ), | |
| hovermode="x unified", | |
| template="plotly_white" | |
| ) | |
| else: | |
| fig_metrics = go.Figure().add_annotation( | |
| text="Insufficient data for performance plot", | |
| xref="paper", yref="paper", | |
| x=0.5, y=0.5, showarrow=False, | |
| font=dict(size=16) | |
| ) | |
| # Create resource usage plot | |
| if len(metrics["timestamps"]) > 1: | |
| fig_resources = make_subplots( | |
| rows=2, cols=1, | |
| subplot_titles=("CPU Usage (%)", "Memory Usage (%)"), | |
| vertical_spacing=0.1 | |
| ) | |
| fig_resources.add_trace( | |
| go.Scatter( | |
| x=metrics["timestamps"], | |
| y=metrics["cpu_usage"], | |
| mode='lines+markers', | |
| name='CPU Usage', | |
| line=dict(color='#f59e0b', width=2) | |
| ), | |
| row=1, col=1 | |
| ) | |
| fig_resources.add_trace( | |
| go.Scatter( | |
| x=metrics["timestamps"], | |
| y=metrics["memory_usage"], | |
| mode='lines+markers', | |
| name='Memory Usage', | |
| line=dict(color='#8b5cf6', width=2) | |
| ), | |
| row=2, col=1 | |
| ) | |
| fig_resources.update_layout( | |
| title="System Resource Usage", | |
| template="plotly_white", | |
| height=400 | |
| ) | |
| else: | |
| fig_resources = go.Figure().add_annotation( | |
| text="Insufficient data for resource plot", | |
| xref="paper", yref="paper", | |
| x=0.5, y=0.5, showarrow=False, | |
| font=dict(size=16) | |
| ) | |
| # Create agent stats table | |
| agent_stats = self.agent_manager.agent_stats | |
| stats_data = [] | |
| for agent_type, stats in agent_stats.items(): | |
| stats_data.append([ | |
| agent_type, | |
| stats["requests_handled"], | |
| f"{stats['success_rate']:.1%}", | |
| f"{stats['average_response_time']:.2f}s", | |
| stats["last_used"].strftime("%Y-%m-%d %H:%M") if stats["last_used"] else "Never" | |
| ]) | |
| return { | |
| "metrics_plot": fig_metrics, | |
| "resource_plot": fig_resources, | |
| "agent_stats_table": stats_data | |
| } | |
| except Exception as e: | |
| logger.error(f"Failed to generate analytics: {e}") | |
| return { | |
| "metrics_plot": None, | |
| "resource_plot": None, | |
| "agent_stats_table": [] | |
| } | |
| def create_app(): | |
| """Create and return the Gradio application.""" | |
| try: | |
| app = SpacesApp() | |
| interface = app.create_interface() | |
| logger.info("Application created successfully") | |
| return interface | |
| except Exception as e: | |
| logger.error(f"Failed to create application: {e}") | |
| logger.error(traceback.format_exc()) | |
| # Return minimal error interface | |
| with gr.Blocks() as error_interface: | |
| gr.HTML(f""" | |
| <div style="text-align: center; padding: 50px; background: #fee2e2; border: 2px solid #fecaca; border-radius: 10px; margin: 20px;"> | |
| <h1 style="color: #dc2626; margin: 0;">⚠️ Application Error</h1> | |
| <p style="color: #7f1d1d; font-size: 1.2em; margin: 20px 0;">Failed to initialize the Secure AI Agents Suite</p> | |
| <p style="color: #7f1d1d;">Error: {e}</p> | |
| <p style="color: #7f1d1d;">Please check the logs and try again later.</p> | |
| </div> | |
| """) | |
| return error_interface | |
| # Health check endpoint for monitoring | |
| def health_check(): | |
| """Health check endpoint for Spaces monitoring.""" | |
| try: | |
| return { | |
| "status": "healthy", | |
| "timestamp": datetime.utcnow().isoformat(), | |
| "version": "2.0.0", | |
| "uptime_seconds": (datetime.utcnow() - datetime.utcnow()).total_seconds(), # Placeholder | |
| "memory_usage": psutil.virtual_memory().percent, | |
| "cpu_usage": psutil.cpu_percent() | |
| } | |
| except Exception as e: | |
| return { | |
| "status": "unhealthy", | |
| "error": str(e), | |
| "timestamp": datetime.utcnow().isoformat() | |
| } | |
| # Apply HuggingFace Spaces GPU decorator if available | |
| if SPACES_AVAILABLE: | |
| # Only apply GPU decorator if GPU is explicitly enabled | |
| if os.getenv("ENABLE_GPU", "false").lower() == "true": | |
| create_app = spaces.GPU(create_app) | |
| # HuggingFace Spaces GPU function for deployment detection | |
| if SPACES_AVAILABLE: | |
| def spaces_app(): | |
| """HuggingFace Spaces GPU-enabled application function""" | |
| return create_app() | |
| else: | |
| def spaces_app(): | |
| """HuggingFace Spaces application function""" | |
| return create_app() | |
| if __name__ == "__main__": | |
| # Create and launch the application | |
| try: | |
| interface = create_app() | |
| # Configure server settings for Spaces | |
| server_name = os.getenv("HOST", "0.0.0.0") | |
| server_port = int(os.getenv("GRADIO_SERVER_PORT", os.getenv("PORT", "7860"))) | |
| logger.info(f"Starting application on {server_name}:{server_port}") | |
| interface.launch( | |
| server_name=server_name, | |
| server_port=server_port, | |
| share=False, # Don't create public share link in Spaces | |
| show_error=True, | |
| quiet=False, | |
| height=800 | |
| ) | |
| except Exception as e: | |
| logger.error(f"Failed to start application: {e}") | |
| logger.error(traceback.format_exc()) | |
| sys.exit(1) |