"""
AI Historical Simulation Platform - Performance Monitor

This module provides comprehensive real-time performance monitoring with <200ms
response time tracking, memory usage monitoring, CPU utilization tracking,
and alert systems for performance degradation.
"""

import asyncio
import logging
import time
import psutil
from collections import deque, defaultdict
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Callable, Deque
from dataclasses import dataclass, asdict
from statistics import mean, median
import threading
import json
from pathlib import Path

logger = logging.getLogger(__name__)


@dataclass
class PerformanceMetric:
    """Individual performance metric data point."""
    name: str
    value: float
    timestamp: datetime
    metadata: Dict[str, Any] = None
    
    def to_dict(self) -> Dict[str, Any]:
        """Convert to dictionary."""
        return {
            'name': self.name,
            'value': self.value,
            'timestamp': self.timestamp.isoformat(),
            'metadata': self.metadata or {}
        }


@dataclass
class AlertRule:
    """Performance alert rule definition."""
    name: str
    metric_name: str
    threshold: float
    comparison: str  # 'gt', 'lt', 'eq'
    duration_seconds: int
    callback: Optional[Callable[[str, float, Dict[str, Any]], None]] = None
    enabled: bool = True


@dataclass
class SystemSnapshot:
    """System performance snapshot."""
    timestamp: datetime
    cpu_percent: float
    memory_percent: float
    memory_mb: float
    disk_usage_percent: float
    network_io: Dict[str, int]
    process_count: int
    thread_count: int
    
    def to_dict(self) -> Dict[str, Any]:
        """Convert to dictionary."""
        return {
            'timestamp': self.timestamp.isoformat(),
            'cpu_percent': self.cpu_percent,
            'memory_percent': self.memory_percent,
            'memory_mb': self.memory_mb,
            'disk_usage_percent': self.disk_usage_percent,
            'network_io': self.network_io,
            'process_count': self.process_count,
            'thread_count': self.thread_count
        }


class MetricsAggregator:
    """Aggregates and analyzes performance metrics."""
    
    def __init__(self, max_history: int = 10000):
        """
        Initialize metrics aggregator.
        
        Args:
            max_history: Maximum number of metrics to keep in memory
        """
        self.max_history = max_history
        self.metrics: Dict[str, Deque[PerformanceMetric]] = defaultdict(lambda: deque(maxlen=max_history))
        self._lock = threading.Lock()
    
    def add_metric(self, name: str, value: float, metadata: Optional[Dict[str, Any]] = None) -> None:
        """Add a metric data point."""
        metric = PerformanceMetric(
            name=name,
            value=value,
            timestamp=datetime.now(),
            metadata=metadata or {}
        )
        
        with self._lock:
            self.metrics[name].append(metric)
    
    def get_metrics(self, name: str, minutes: int = 60) -> List[PerformanceMetric]:
        """Get metrics for the last N minutes."""
        cutoff_time = datetime.now() - timedelta(minutes=minutes)
        
        with self._lock:
            return [
                metric for metric in self.metrics[name]
                if metric.timestamp >= cutoff_time
            ]
    
    def get_statistics(self, name: str, minutes: int = 60) -> Dict[str, float]:
        """Get statistical summary for a metric."""
        metrics = self.get_metrics(name, minutes)
        
        if not metrics:
            return {}
        
        values = [m.value for m in metrics]
        
        return {
            'count': len(values),
            'min': min(values),
            'max': max(values),
            'mean': mean(values),
            'median': median(values),
            'latest': values[-1] if values else 0.0
        }
    
    def get_trend(self, name: str, minutes: int = 60) -> str:
        """Get trend direction for a metric."""
        metrics = self.get_metrics(name, minutes)
        
        if len(metrics) < 2:
            return "insufficient_data"
        
        # Compare first half with second half
        mid_point = len(metrics) // 2
        first_half = [m.value for m in metrics[:mid_point]]
        second_half = [m.value for m in metrics[mid_point:]]
        
        if not first_half or not second_half:
            return "insufficient_data"
        
        first_avg = mean(first_half)
        second_avg = mean(second_half)
        
        change_percent = ((second_avg - first_avg) / first_avg) * 100 if first_avg != 0 else 0
        
        if abs(change_percent) < 5:  # Less than 5% change
            return "stable"
        elif change_percent > 0:
            return "increasing"
        else:
            return "decreasing"


class AlertManager:
    """Manages performance alerts and notifications."""
    
    def __init__(self):
        """Initialize alert manager."""
        self.rules: Dict[str, AlertRule] = {}
        self.alert_states: Dict[str, Dict[str, Any]] = {}
        self.alert_history: List[Dict[str, Any]] = []
        self.max_history = 1000
        self._lock = threading.Lock()
    
    def add_rule(self, rule: AlertRule) -> None:
        """Add alert rule."""
        with self._lock:
            self.rules[rule.name] = rule
            self.alert_states[rule.name] = {
                'active': False,
                'triggered_at': None,
                'last_value': None,
                'trigger_count': 0
            }
    
    def remove_rule(self, rule_name: str) -> None:
        """Remove alert rule."""
        with self._lock:
            self.rules.pop(rule_name, None)
            self.alert_states.pop(rule_name, None)
    
    def check_rules(self, metrics: Dict[str, float]) -> List[Dict[str, Any]]:
        """Check all rules against current metrics."""
        triggered_alerts = []
        
        with self._lock:
            for rule_name, rule in self.rules.items():
                if not rule.enabled or rule.metric_name not in metrics:
                    continue
                
                current_value = metrics[rule.metric_name]
                state = self.alert_states[rule_name]
                
                # Check if threshold is breached
                threshold_breached = self._evaluate_threshold(
                    current_value, rule.threshold, rule.comparison
                )
                
                if threshold_breached:
                    if not state['active']:
                        # Start tracking potential alert
                        state['triggered_at'] = datetime.now()
                        state['trigger_count'] = 1
                    else:
                        state['trigger_count'] += 1
                    
                    state['last_value'] = current_value
                    
                    # Check if duration threshold is met
                    if (state['triggered_at'] and 
                        (datetime.now() - state['triggered_at']).total_seconds() >= rule.duration_seconds):
                        
                        if not state['active']:
                            # Activate alert
                            state['active'] = True
                            alert = self._create_alert(rule, current_value)
                            triggered_alerts.append(alert)
                            self.alert_history.append(alert)
                            
                            # Trim history
                            if len(self.alert_history) > self.max_history:
                                self.alert_history = self.alert_history[-self.max_history:]
                            
                            # Call callback if provided
                            if rule.callback:
                                try:
                                    rule.callback(rule.name, current_value, alert)
                                except Exception as e:
                                    logger.error(f"Alert callback error: {e}")
                
                else:
                    # Threshold not breached, reset state
                    if state['active']:
                        # Deactivate alert
                        state['active'] = False
                        deactivation_alert = {
                            'type': 'resolved',
                            'rule_name': rule_name,
                            'metric_name': rule.metric_name,
                            'resolved_at': datetime.now().isoformat(),
                            'last_value': current_value
                        }
                        triggered_alerts.append(deactivation_alert)
                        self.alert_history.append(deactivation_alert)
                    
                    state['triggered_at'] = None
                    state['trigger_count'] = 0
                    state['last_value'] = current_value
        
        return triggered_alerts
    
    def _evaluate_threshold(self, value: float, threshold: float, comparison: str) -> bool:
        """Evaluate threshold condition."""
        if comparison == 'gt':
            return value > threshold
        elif comparison == 'lt':
            return value < threshold
        elif comparison == 'eq':
            return abs(value - threshold) < 0.001  # Floating point comparison
        elif comparison == 'gte':
            return value >= threshold
        elif comparison == 'lte':
            return value <= threshold
        else:
            return False
    
    def _create_alert(self, rule: AlertRule, current_value: float) -> Dict[str, Any]:
        """Create alert dictionary."""
        return {
            'type': 'alert',
            'rule_name': rule.name,
            'metric_name': rule.metric_name,
            'threshold': rule.threshold,
            'current_value': current_value,
            'comparison': rule.comparison,
            'triggered_at': datetime.now().isoformat(),
            'severity': self._calculate_severity(current_value, rule.threshold, rule.comparison)
        }
    
    def _calculate_severity(self, value: float, threshold: float, comparison: str) -> str:
        """Calculate alert severity based on how much threshold is exceeded."""
        if comparison in ['gt', 'gte']:
            excess = (value - threshold) / threshold if threshold != 0 else float('inf')
        elif comparison in ['lt', 'lte']:
            excess = (threshold - value) / threshold if threshold != 0 else float('inf')
        else:
            excess = abs(value - threshold) / threshold if threshold != 0 else 0
        
        if excess >= 1.0:  # 100% or more
            return 'critical'
        elif excess >= 0.5:  # 50-99%
            return 'high'
        elif excess >= 0.2:  # 20-49%
            return 'medium'
        else:
            return 'low'
    
    def get_active_alerts(self) -> List[Dict[str, Any]]:
        """Get currently active alerts."""
        with self._lock:
            active_alerts = []
            for rule_name, state in self.alert_states.items():
                if state['active']:
                    rule = self.rules[rule_name]
                    active_alerts.append({
                        'rule_name': rule_name,
                        'metric_name': rule.metric_name,
                        'threshold': rule.threshold,
                        'current_value': state['last_value'],
                        'triggered_at': state['triggered_at'].isoformat() if state['triggered_at'] else None,
                        'trigger_count': state['trigger_count']
                    })
            return active_alerts
    
    def get_alert_history(self, hours: int = 24) -> List[Dict[str, Any]]:
        """Get alert history for the last N hours."""
        cutoff_time = datetime.now() - timedelta(hours=hours)
        
        return [
            alert for alert in self.alert_history
            if datetime.fromisoformat(alert.get('triggered_at', alert.get('resolved_at', ''))) >= cutoff_time
        ]


class PerformanceMonitor:
    """
    Comprehensive real-time performance monitor for the AI Historical Simulation Platform.
    
    Features:
    - <200ms response time monitoring with alerts
    - Memory usage and CPU utilization tracking
    - System resource monitoring (disk, network, processes)
    - Custom metrics collection and analysis
    - Alert system for performance degradation
    - Historical performance data and trends
    - Real-time dashboard metrics
    """
    
    def __init__(self, 
                 collection_interval: float = 1.0,
                 response_time_threshold_ms: float = 200.0,
                 memory_threshold_percent: float = 85.0,
                 cpu_threshold_percent: float = 80.0):
        """
        Initialize performance monitor.
        
        Args:
            collection_interval: Metrics collection interval in seconds
            response_time_threshold_ms: Response time alert threshold in milliseconds
            memory_threshold_percent: Memory usage alert threshold
            cpu_threshold_percent: CPU usage alert threshold
        """
        self.collection_interval = collection_interval
        self.response_time_threshold = response_time_threshold_ms
        
        # Core components
        self.aggregator = MetricsAggregator()
        self.alert_manager = AlertManager()
        
        # System monitoring
        self.system_snapshots: Deque[SystemSnapshot] = deque(maxlen=10000)
        self.process = psutil.Process()
        
        # Performance tracking
        self.response_times: Deque[float] = deque(maxlen=10000)
        self.operation_counts = defaultdict(int)
        
        # Background tasks
        self.monitoring_task: Optional[asyncio.Task] = None
        self.analysis_task: Optional[asyncio.Task] = None
        self.shutdown_event = asyncio.Event()
        
        # Configuration
        self.metrics_file = Path("performance_metrics.jsonl")
        self.save_metrics_to_disk = True
        
        # Initialize default alert rules
        self._setup_default_alerts()
        
        logger.info(f"PerformanceMonitor initialized with {collection_interval}s interval")
    
    async def initialize(self) -> None:
        """Initialize performance monitoring."""
        logger.info("Initializing PerformanceMonitor")
        
        # Start background monitoring
        await self._start_monitoring_tasks()
        
        # Record initialization
        self.aggregator.add_metric("system.initialized", 1.0, {'component': 'performance_monitor'})
        
        logger.info("PerformanceMonitor initialized successfully")
    
    async def shutdown(self) -> None:
        """Shutdown performance monitoring."""
        logger.info("Shutting down PerformanceMonitor")
        
        # Signal shutdown
        self.shutdown_event.set()
        
        # Cancel monitoring tasks
        if self.monitoring_task:
            self.monitoring_task.cancel()
            try:
                await self.monitoring_task
            except asyncio.CancelledError:
                pass
        
        if self.analysis_task:
            self.analysis_task.cancel()
            try:
                await self.analysis_task
            except asyncio.CancelledError:
                pass
        
        # Save final metrics
        await self._save_metrics_to_disk()
        
        logger.info("PerformanceMonitor shutdown completed")
    
    # Core monitoring methods
    
    async def record_metric(self, name: str, value: float, 
                          metadata: Optional[Dict[str, Any]] = None) -> None:
        """Record a custom performance metric."""
        self.aggregator.add_metric(name, value, metadata)
        
        # Check for immediate alerts
        current_metrics = {name: value}
        alerts = self.alert_manager.check_rules(current_metrics)
        
        if alerts:
            await self._handle_alerts(alerts)
    
    async def record_response_time(self, response_time_ms: float, 
                                 operation: str = "general") -> None:
        """Record response time for monitoring."""
        self.response_times.append(response_time_ms)
        self.operation_counts[operation] += 1
        
        await self.record_metric(
            "response_time_ms", 
            response_time_ms,
            {'operation': operation}
        )
        
        # Log warning for slow responses
        if response_time_ms > self.response_time_threshold:
            logger.warning(f"Slow response: {response_time_ms:.1f}ms for {operation} (threshold: {self.response_time_threshold}ms)")
    
    async def record_event(self, event_name: str, details: Optional[str] = None) -> None:
        """Record a system event."""
        await self.record_metric(
            f"event.{event_name}",
            1.0,
            {'details': details, 'timestamp': datetime.now().isoformat()}
        )
    
    # System metrics collection
    
    async def collect_system_metrics(self) -> SystemSnapshot:
        """Collect current system performance metrics."""
        try:
            # CPU usage
            cpu_percent = psutil.cpu_percent(interval=0.1)
            
            # Memory usage
            memory = psutil.virtual_memory()
            memory_percent = memory.percent
            memory_mb = memory.used / 1024 / 1024
            
            # Disk usage (root partition)
            disk = psutil.disk_usage('/')
            disk_percent = disk.percent
            
            # Network I/O
            net_io = psutil.net_io_counters()
            network_io = {
                'bytes_sent': net_io.bytes_sent,
                'bytes_recv': net_io.bytes_recv,
                'packets_sent': net_io.packets_sent,
                'packets_recv': net_io.packets_recv
            }
            
            # Process information
            process_count = len(psutil.pids())
            
            # Thread count for current process
            try:
                thread_count = self.process.num_threads()
            except (psutil.NoSuchProcess, psutil.AccessDenied):
                thread_count = 0
            
            snapshot = SystemSnapshot(
                timestamp=datetime.now(),
                cpu_percent=cpu_percent,
                memory_percent=memory_percent,
                memory_mb=memory_mb,
                disk_usage_percent=disk_percent,
                network_io=network_io,
                process_count=process_count,
                thread_count=thread_count
            )
            
            # Store snapshot
            self.system_snapshots.append(snapshot)
            
            # Record as individual metrics
            await self.record_metric("system.cpu_percent", cpu_percent)
            await self.record_metric("system.memory_percent", memory_percent)
            await self.record_metric("system.memory_mb", memory_mb)
            await self.record_metric("system.disk_percent", disk_percent)
            await self.record_metric("system.process_count", process_count)
            await self.record_metric("system.thread_count", thread_count)
            
            return snapshot
            
        except Exception as e:
            logger.error(f"Failed to collect system metrics: {e}")
            return SystemSnapshot(
                timestamp=datetime.now(),
                cpu_percent=0.0, memory_percent=0.0, memory_mb=0.0,
                disk_usage_percent=0.0, network_io={},
                process_count=0, thread_count=0
            )
    
    # Data retrieval and analysis
    
    async def get_current_metrics(self) -> Dict[str, Any]:
        """Get current performance metrics summary."""
        current_time = datetime.now()
        
        # Response time statistics
        response_times = list(self.response_times)
        response_stats = {}
        if response_times:
            response_stats = {
                'count': len(response_times),
                'avg': mean(response_times),
                'median': median(response_times),
                'min': min(response_times),
                'max': max(response_times),
                'over_threshold': sum(1 for rt in response_times if rt > self.response_time_threshold)
            }
        
        # System metrics
        system_stats = {}
        if self.system_snapshots:
            latest_snapshot = self.system_snapshots[-1]
            system_stats = {
                'cpu_percent': latest_snapshot.cpu_percent,
                'memory_percent': latest_snapshot.memory_percent,
                'memory_mb': latest_snapshot.memory_mb,
                'disk_percent': latest_snapshot.disk_usage_percent,
                'process_count': latest_snapshot.process_count,
                'thread_count': latest_snapshot.thread_count
            }
        
        # Alert information
        active_alerts = self.alert_manager.get_active_alerts()
        
        # Operation counts
        operation_summary = dict(self.operation_counts)
        
        return {
            'timestamp': current_time.isoformat(),
            'response_times': response_stats,
            'system': system_stats,
            'alerts': {
                'active_count': len(active_alerts),
                'active_alerts': active_alerts
            },
            'operations': operation_summary,
            'monitoring': {
                'collection_interval': self.collection_interval,
                'response_threshold_ms': self.response_time_threshold,
                'metrics_collected': sum(len(metrics) for metrics in self.aggregator.metrics.values())
            }
        }
    
    async def get_all_metrics(self) -> Dict[str, Any]:
        """Get comprehensive metrics including historical data."""
        current = await self.get_current_metrics()
        
        # Add historical analysis
        historical = {}
        for metric_name in ['response_time_ms', 'system.cpu_percent', 'system.memory_percent']:
            stats = self.aggregator.get_statistics(metric_name, minutes=60)
            trend = self.aggregator.get_trend(metric_name, minutes=60)
            historical[metric_name] = {
                'statistics': stats,
                'trend': trend
            }
        
        # Add alert history
        alert_history = self.alert_manager.get_alert_history(hours=24)
        
        return {
            **current,
            'historical': historical,
            'alert_history': alert_history,
            'system_snapshots_count': len(self.system_snapshots)
        }
    
    async def get_performance_report(self, hours: int = 24) -> Dict[str, Any]:
        """Generate comprehensive performance report."""
        cutoff_time = datetime.now() - timedelta(hours=hours)
        
        # Response time analysis
        recent_responses = [rt for rt in self.response_times 
                          if datetime.now() - timedelta(seconds=len(self.response_times) - list(self.response_times).index(rt)) >= cutoff_time]
        
        response_analysis = {}
        if recent_responses:
            response_analysis = {
                'total_requests': len(recent_responses),
                'avg_response_time': mean(recent_responses),
                'median_response_time': median(recent_responses),
                'p95_response_time': sorted(recent_responses)[int(len(recent_responses) * 0.95)],
                'p99_response_time': sorted(recent_responses)[int(len(recent_responses) * 0.99)],
                'slow_requests': sum(1 for rt in recent_responses if rt > self.response_time_threshold),
                'slow_request_percentage': (sum(1 for rt in recent_responses if rt > self.response_time_threshold) / len(recent_responses)) * 100
            }
        
        # System performance trends
        recent_snapshots = [s for s in self.system_snapshots if s.timestamp >= cutoff_time]
        system_analysis = {}
        if recent_snapshots:
            cpu_values = [s.cpu_percent for s in recent_snapshots]
            memory_values = [s.memory_percent for s in recent_snapshots]
            
            system_analysis = {
                'avg_cpu_percent': mean(cpu_values),
                'max_cpu_percent': max(cpu_values),
                'avg_memory_percent': mean(memory_values),
                'max_memory_percent': max(memory_values),
                'snapshot_count': len(recent_snapshots)
            }
        
        # Alert summary
        alert_history = self.alert_manager.get_alert_history(hours=hours)
        alert_analysis = {
            'total_alerts': len(alert_history),
            'alert_types': defaultdict(int),
            'critical_alerts': sum(1 for a in alert_history if a.get('severity') == 'critical')
        }
        
        for alert in alert_history:
            alert_analysis['alert_types'][alert.get('type', 'unknown')] += 1
        
        return {
            'report_period_hours': hours,
            'generated_at': datetime.now().isoformat(),
            'response_time_analysis': response_analysis,
            'system_analysis': system_analysis,
            'alert_analysis': dict(alert_analysis['alert_types']),
            'summary': {
                'performance_grade': self._calculate_performance_grade(response_analysis, system_analysis, alert_analysis),
                'recommendations': self._generate_recommendations(response_analysis, system_analysis, alert_analysis)
            }
        }
    
    # Alert configuration
    
    def add_alert_rule(self, rule: AlertRule) -> None:
        """Add custom alert rule."""
        self.alert_manager.add_rule(rule)
        logger.info(f"Added alert rule: {rule.name}")
    
    def remove_alert_rule(self, rule_name: str) -> None:
        """Remove alert rule."""
        self.alert_manager.remove_rule(rule_name)
        logger.info(f"Removed alert rule: {rule_name}")
    
    # Private methods
    
    def _setup_default_alerts(self) -> None:
        """Setup default performance alert rules."""
        
        # Response time alert
        self.alert_manager.add_rule(AlertRule(
            name="high_response_time",
            metric_name="response_time_ms",
            threshold=self.response_time_threshold,
            comparison="gt",
            duration_seconds=30,
            callback=self._default_alert_callback
        ))
        
        # High CPU usage alert
        self.alert_manager.add_rule(AlertRule(
            name="high_cpu_usage",
            metric_name="system.cpu_percent",
            threshold=80.0,
            comparison="gt",
            duration_seconds=60,
            callback=self._default_alert_callback
        ))
        
        # High memory usage alert
        self.alert_manager.add_rule(AlertRule(
            name="high_memory_usage",
            metric_name="system.memory_percent",
            threshold=85.0,
            comparison="gt",
            duration_seconds=60,
            callback=self._default_alert_callback
        ))
        
        # Low disk space alert
        self.alert_manager.add_rule(AlertRule(
            name="low_disk_space",
            metric_name="system.disk_percent",
            threshold=90.0,
            comparison="gt",
            duration_seconds=300,  # 5 minutes
            callback=self._default_alert_callback
        ))
    
    def _default_alert_callback(self, rule_name: str, value: float, alert_data: Dict[str, Any]) -> None:
        """Default alert callback."""
        severity = alert_data.get('severity', 'unknown')
        metric_name = alert_data.get('metric_name', 'unknown')
        
        logger.warning(f"PERFORMANCE ALERT [{severity.upper()}]: {rule_name} - {metric_name}={value}")
    
    async def _start_monitoring_tasks(self) -> None:
        """Start background monitoring tasks."""
        self.monitoring_task = asyncio.create_task(self._monitoring_loop())
        self.analysis_task = asyncio.create_task(self._analysis_loop())
        logger.info("Started performance monitoring tasks")
    
    async def _monitoring_loop(self) -> None:
        """Main monitoring loop."""
        while not self.shutdown_event.is_set():
            try:
                # Collect system metrics
                await self.collect_system_metrics()
                
                # Sleep for collection interval
                await asyncio.sleep(self.collection_interval)
                
            except Exception as e:
                logger.error(f"Monitoring loop error: {e}")
                await asyncio.sleep(self.collection_interval * 2)  # Wait longer on error
    
    async def _analysis_loop(self) -> None:
        """Analysis and alerting loop."""
        while not self.shutdown_event.is_set():
            try:
                # Get current metrics for alert checking
                current_metrics = {}
                if self.system_snapshots:
                    latest = self.system_snapshots[-1]
                    current_metrics = {
                        'system.cpu_percent': latest.cpu_percent,
                        'system.memory_percent': latest.memory_percent,
                        'system.disk_percent': latest.disk_usage_percent
                    }
                
                # Add response time metrics
                if self.response_times:
                    current_metrics['response_time_ms'] = self.response_times[-1]
                
                # Check alert rules
                alerts = self.alert_manager.check_rules(current_metrics)
                if alerts:
                    await self._handle_alerts(alerts)
                
                # Save metrics periodically
                if self.save_metrics_to_disk:
                    await self._save_metrics_to_disk()
                
                # Analysis runs every 30 seconds
                await asyncio.sleep(30)
                
            except Exception as e:
                logger.error(f"Analysis loop error: {e}")
                await asyncio.sleep(60)  # Wait longer on error
    
    async def _handle_alerts(self, alerts: List[Dict[str, Any]]) -> None:
        """Handle triggered alerts."""
        for alert in alerts:
            if alert['type'] == 'alert':
                logger.warning(f"Performance alert: {alert}")
            elif alert['type'] == 'resolved':
                logger.info(f"Alert resolved: {alert}")
    
    async def _save_metrics_to_disk(self) -> None:
        """Save metrics to disk for persistence."""
        try:
            if not self.metrics_file.exists():
                self.metrics_file.touch()
            
            # Save recent metrics (last 1000 entries per metric)
            metrics_data = {}
            for metric_name, metric_deque in self.aggregator.metrics.items():
                recent_metrics = list(metric_deque)[-1000:]  # Last 1000 entries
                metrics_data[metric_name] = [m.to_dict() for m in recent_metrics]
            
            # Append to JSONL file (simplified - would use proper storage in production)
            with open(self.metrics_file, 'a') as f:
                json.dump({
                    'timestamp': datetime.now().isoformat(),
                    'metrics': metrics_data
                }, f)
                f.write('\n')
            
        except Exception as e:
            logger.error(f"Failed to save metrics to disk: {e}")
    
    def _calculate_performance_grade(self, 
                                   response_analysis: Dict[str, Any],
                                   system_analysis: Dict[str, Any],
                                   alert_analysis: Dict[str, Any]) -> str:
        """Calculate overall performance grade."""
        score = 100
        
        # Response time penalties
        if response_analysis:
            avg_response = response_analysis.get('avg_response_time', 0)
            slow_percentage = response_analysis.get('slow_request_percentage', 0)
            
            if avg_response > self.response_time_threshold * 2:
                score -= 30
            elif avg_response > self.response_time_threshold:
                score -= 15
            
            if slow_percentage > 10:
                score -= 20
            elif slow_percentage > 5:
                score -= 10
        
        # System resource penalties
        if system_analysis:
            avg_cpu = system_analysis.get('avg_cpu_percent', 0)
            avg_memory = system_analysis.get('avg_memory_percent', 0)
            
            if avg_cpu > 80:
                score -= 15
            elif avg_cpu > 60:
                score -= 5
            
            if avg_memory > 85:
                score -= 15
            elif avg_memory > 70:
                score -= 5
        
        # Alert penalties
        critical_alerts = alert_analysis.get('critical_alerts', 0)
        if critical_alerts > 0:
            score -= min(critical_alerts * 10, 30)
        
        # Convert score to grade
        if score >= 90:
            return 'A'
        elif score >= 80:
            return 'B'
        elif score >= 70:
            return 'C'
        elif score >= 60:
            return 'D'
        else:
            return 'F'
    
    def _generate_recommendations(self, 
                                response_analysis: Dict[str, Any],
                                system_analysis: Dict[str, Any],
                                alert_analysis: Dict[str, Any]) -> List[str]:
        """Generate performance improvement recommendations."""
        recommendations = []
        
        # Response time recommendations
        if response_analysis:
            avg_response = response_analysis.get('avg_response_time', 0)
            slow_percentage = response_analysis.get('slow_request_percentage', 0)
            
            if avg_response > self.response_time_threshold:
                recommendations.append("Consider optimizing response generation algorithms")
                recommendations.append("Review database query performance and add caching")
            
            if slow_percentage > 10:
                recommendations.append("Implement response caching for frequently requested content")
                recommendations.append("Consider load balancing across multiple workers")
        
        # System resource recommendations
        if system_analysis:
            avg_cpu = system_analysis.get('avg_cpu_percent', 0)
            avg_memory = system_analysis.get('avg_memory_percent', 0)
            
            if avg_cpu > 70:
                recommendations.append("High CPU usage detected - consider scaling horizontally")
                recommendations.append("Profile CPU-intensive operations for optimization")
            
            if avg_memory > 80:
                recommendations.append("High memory usage - implement memory cleanup routines")
                recommendations.append("Consider reducing memory cache sizes")
        
        # Alert-based recommendations
        if alert_analysis.get('critical_alerts', 0) > 0:
            recommendations.append("Address critical alerts immediately")
            recommendations.append("Review alert thresholds and monitoring configuration")
        
        return recommendations if recommendations else ["System performing well - no recommendations"]


# Context manager for performance monitoring
async def managed_performance_monitor(**kwargs):
    """Context manager for performance monitor lifecycle."""
    monitor = PerformanceMonitor(**kwargs)
    try:
        await monitor.initialize()
        yield monitor
    finally:
        await monitor.shutdown()