"""
自适应性能控制器
基于系统负载动态调整处理参数，实现智能性能优化
支持帧率调整、任务优先级管理和资源分配优化
"""
import asyncio
import logging
import time
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Tuple
from dataclasses import dataclass, asdict
from enum import Enum
import statistics
from collections import deque, defaultdict

from core.config import get_settings

logger = logging.getLogger(__name__)
settings = get_settings()


class PerformanceLevel(Enum):
    """性能级别枚举"""
    CRITICAL = "critical"    # 系统过载，需要紧急调整
    HIGH = "high"           # 高负载，需要优化
    NORMAL = "normal"       # 正常负载
    LOW = "low"            # 低负载，可以提升性能
    IDLE = "idle"          # 空闲状态


class TaskPriority(Enum):
    """任务优先级枚举"""
    EMERGENCY = 1    # 紧急任务（安全相关）
    HIGH = 2        # 高优先级任务
    NORMAL = 3      # 普通任务
    LOW = 4         # 低优先级任务
    BACKGROUND = 5  # 后台任务


@dataclass
class SystemLoad:
    """系统负载信息"""
    timestamp: datetime
    cpu_percent: float
    memory_percent: float
    gpu_utilization: float
    gpu_memory_percent: float
    active_streams: int
    processing_fps: float
    queue_length: int
    response_time_ms: float
    
    @property
    def overall_load_score(self) -> float:
        """综合负载评分 (0-100)"""
        weights = {
            'cpu': 0.25,
            'memory': 0.20,
            'gpu_util': 0.30,
            'gpu_memory': 0.15,
            'queue': 0.10
        }
        
        # 队列长度归一化 (假设最大队列长度为100)
        queue_score = min(self.queue_length / 100.0 * 100, 100)
        
        score = (
            weights['cpu'] * self.cpu_percent +
            weights['memory'] * self.memory_percent +
            weights['gpu_util'] * self.gpu_utilization +
            weights['gpu_memory'] * self.gpu_memory_percent +
            weights['queue'] * queue_score
        )
        
        return min(score, 100.0)


@dataclass
class PerformanceConfig:
    """性能配置参数"""
    target_fps: float = 3.0
    max_concurrent_streams: int = 50
    batch_size: int = 4
    processing_timeout: int = 30
    queue_size_limit: int = 100
    memory_threshold: float = 80.0
    cpu_threshold: float = 80.0
    gpu_threshold: float = 85.0
    
    def to_dict(self) -> Dict[str, Any]:
        return asdict(self)


@dataclass
class AdaptiveAction:
    """自适应调整动作"""
    action_type: str
    parameter: str
    old_value: Any
    new_value: Any
    reason: str
    timestamp: datetime
    expected_impact: str


class AdaptivePerformanceController:
    """自适应性能控制器"""
    
    def __init__(self):
        # 性能配置
        self.config = PerformanceConfig()
        self.original_config = PerformanceConfig()  # 保存原始配置
        
        # 系统负载历史
        self.load_history: deque = deque(maxlen=300)  # 5分钟历史 (每秒一个)
        self.performance_history: deque = deque(maxlen=100)  # 性能调整历史
        
        # 当前状态
        self.current_performance_level = PerformanceLevel.NORMAL
        self.last_adjustment_time = datetime.now()
        self.adjustment_cooldown = 30  # 调整冷却时间(秒)
        
        # 任务管理
        self.active_tasks: Dict[str, Dict[str, Any]] = {}
        self.paused_tasks: Dict[str, Dict[str, Any]] = {}
        self.task_priorities: Dict[str, TaskPriority] = {}
        
        # 性能阈值
        self.thresholds = {
            PerformanceLevel.CRITICAL: 90.0,
            PerformanceLevel.HIGH: 75.0,
            PerformanceLevel.NORMAL: 50.0,
            PerformanceLevel.LOW: 25.0,
            PerformanceLevel.IDLE: 10.0
        }
        
        # 自适应参数
        self.adaptive_params = {
            'fps_adjustment_factor': 0.8,      # FPS调整因子
            'batch_size_factor': 0.7,          # 批处理大小调整因子
            'timeout_extension_factor': 1.5,   # 超时时间延长因子
            'memory_pressure_threshold': 0.85, # 内存压力阈值
            'response_time_threshold': 5000,   # 响应时间阈值(ms)
        }
        
        # 监控任务
        self._monitoring_task = None
        self._adjustment_task = None
        
        # 统计信息
        self.stats = {
            'total_adjustments': 0,
            'fps_adjustments': 0,
            'batch_adjustments': 0,
            'task_pauses': 0,
            'task_resumes': 0,
            'performance_improvements': 0,
            'last_optimization_time': None
        }
    
    async def initialize(self):
        """初始化自适应性能控制器"""
        try:
            # 启动监控任务
            self._monitoring_task = asyncio.create_task(self._monitoring_loop())
            self._adjustment_task = asyncio.create_task(self._adjustment_loop())
            
            logger.info("Adaptive Performance Controller initialized")
            
        except Exception as e:
            logger.error(f"Failed to initialize Adaptive Performance Controller: {e}")
            raise
    
    async def update_system_load(self, load: SystemLoad):
        """更新系统负载信息"""
        try:
            self.load_history.append(load)
            
            # 更新当前性能级别
            new_level = self._calculate_performance_level(load)
            if new_level != self.current_performance_level:
                logger.info(f"Performance level changed: {self.current_performance_level.value} -> {new_level.value}")
                self.current_performance_level = new_level
            
        except Exception as e:
            logger.error(f"Failed to update system load: {e}")
    
    def _calculate_performance_level(self, load: SystemLoad) -> PerformanceLevel:
        """计算当前性能级别"""
        overall_score = load.overall_load_score
        
        if overall_score >= self.thresholds[PerformanceLevel.CRITICAL]:
            return PerformanceLevel.CRITICAL
        elif overall_score >= self.thresholds[PerformanceLevel.HIGH]:
            return PerformanceLevel.HIGH
        elif overall_score >= self.thresholds[PerformanceLevel.NORMAL]:
            return PerformanceLevel.NORMAL
        elif overall_score >= self.thresholds[PerformanceLevel.LOW]:
            return PerformanceLevel.LOW
        else:
            return PerformanceLevel.IDLE
    
    async def adjust_frame_rate(self, camera_id: str, current_fps: float) -> float:
        """动态调整帧率"""
        try:
            if not self.load_history:
                return current_fps
            
            latest_load = self.load_history[-1]
            target_fps = current_fps
            
            # 根据性能级别调整帧率
            if self.current_performance_level == PerformanceLevel.CRITICAL:
                # 紧急情况：大幅降低帧率
                target_fps = current_fps * 0.5
                reason = "Critical system load detected"
            
            elif self.current_performance_level == PerformanceLevel.HIGH:
                # 高负载：适度降低帧率
                target_fps = current_fps * self.adaptive_params['fps_adjustment_factor']
                reason = "High system load detected"
            
            elif self.current_performance_level == PerformanceLevel.LOW:
                # 低负载：可以提升帧率
                max_fps = settings.VIDEO_PROCESSING_FPS * 1.5
                target_fps = min(current_fps * 1.2, max_fps)
                reason = "Low system load, increasing performance"
            
            elif self.current_performance_level == PerformanceLevel.IDLE:
                # 空闲：恢复到最佳帧率
                target_fps = settings.VIDEO_PROCESSING_FPS
                reason = "System idle, restoring optimal performance"
            
            # 限制帧率范围
            target_fps = max(0.5, min(target_fps, settings.VIDEO_PROCESSING_FPS * 2))
            
            # 记录调整
            if abs(target_fps - current_fps) > 0.1:
                action = AdaptiveAction(
                    action_type="fps_adjustment",
                    parameter=f"camera_{camera_id}_fps",
                    old_value=current_fps,
                    new_value=target_fps,
                    reason=reason,
                    timestamp=datetime.now(),
                    expected_impact="Reduce/increase processing load"
                )
                
                self.performance_history.append(action)
                self.stats['fps_adjustments'] += 1
                self.stats['total_adjustments'] += 1
                
                logger.info(f"Adjusted FPS for camera {camera_id}: {current_fps:.1f} -> {target_fps:.1f} ({reason})")
            
            return target_fps
            
        except Exception as e:
            logger.error(f"Failed to adjust frame rate for camera {camera_id}: {e}")
            return current_fps
    
    async def adjust_batch_size(self, current_batch_size: int) -> int:
        """动态调整批处理大小"""
        try:
            if not self.load_history:
                return current_batch_size
            
            latest_load = self.load_history[-1]
            target_batch_size = current_batch_size
            
            # 根据GPU内存使用情况调整批处理大小
            if latest_load.gpu_memory_percent > 90:
                # GPU内存严重不足
                target_batch_size = max(1, int(current_batch_size * 0.5))
                reason = "GPU memory critical"
            
            elif latest_load.gpu_memory_percent > 80:
                # GPU内存紧张
                target_batch_size = max(1, int(current_batch_size * self.adaptive_params['batch_size_factor']))
                reason = "GPU memory high"
            
            elif latest_load.gpu_memory_percent < 50 and latest_load.gpu_utilization < 70:
                # GPU资源充足，可以增加批处理大小
                max_batch_size = settings.MAX_CONCURRENT_STREAMS // 5  # 假设最大批处理大小
                target_batch_size = min(current_batch_size + 1, max_batch_size)
                reason = "GPU resources available"
            
            # 记录调整
            if target_batch_size != current_batch_size:
                action = AdaptiveAction(
                    action_type="batch_size_adjustment",
                    parameter="batch_size",
                    old_value=current_batch_size,
                    new_value=target_batch_size,
                    reason=reason,
                    timestamp=datetime.now(),
                    expected_impact="Optimize GPU memory usage"
                )
                
                self.performance_history.append(action)
                self.stats['batch_adjustments'] += 1
                self.stats['total_adjustments'] += 1
                
                logger.info(f"Adjusted batch size: {current_batch_size} -> {target_batch_size} ({reason})")
            
            return target_batch_size
            
        except Exception as e:
            logger.error(f"Failed to adjust batch size: {e}")
            return current_batch_size
    
    async def manage_task_priority(self, task_id: str, task_type: str, current_priority: TaskPriority) -> TaskPriority:
        """动态管理任务优先级"""
        try:
            # 根据系统负载和任务类型调整优先级
            adjusted_priority = current_priority
            
            if self.current_performance_level == PerformanceLevel.CRITICAL:
                # 紧急情况：只保留高优先级任务
                if current_priority in [TaskPriority.LOW, TaskPriority.BACKGROUND]:
                    adjusted_priority = TaskPriority.BACKGROUND
                    await self._pause_task(task_id, "Critical system load")
            
            elif self.current_performance_level == PerformanceLevel.HIGH:
                # 高负载：降低低优先级任务的优先级
                if current_priority == TaskPriority.LOW:
                    adjusted_priority = TaskPriority.BACKGROUND
            
            elif self.current_performance_level in [PerformanceLevel.LOW, PerformanceLevel.IDLE]:
                # 低负载：恢复暂停的任务
                if task_id in self.paused_tasks:
                    await self._resume_task(task_id, "System load decreased")
            
            # 特殊任务类型的优先级调整
            if task_type in ["safety_detection", "emergency_alert"]:
                adjusted_priority = TaskPriority.EMERGENCY
            elif task_type in ["crowd_analysis", "behavior_detection"]:
                adjusted_priority = max(adjusted_priority, TaskPriority.HIGH)
            
            self.task_priorities[task_id] = adjusted_priority
            return adjusted_priority
            
        except Exception as e:
            logger.error(f"Failed to manage task priority for {task_id}: {e}")
            return current_priority
    
    async def _pause_task(self, task_id: str, reason: str):
        """暂停低优先级任务"""
        try:
            if task_id in self.active_tasks and task_id not in self.paused_tasks:
                task_info = self.active_tasks[task_id]
                task_info['paused_at'] = datetime.now()
                task_info['pause_reason'] = reason
                
                self.paused_tasks[task_id] = task_info
                
                self.stats['task_pauses'] += 1
                logger.info(f"Paused task {task_id}: {reason}")
        
        except Exception as e:
            logger.error(f"Failed to pause task {task_id}: {e}")
    
    async def _resume_task(self, task_id: str, reason: str):
        """恢复暂停的任务"""
        try:
            if task_id in self.paused_tasks:
                task_info = self.paused_tasks.pop(task_id)
                task_info['resumed_at'] = datetime.now()
                task_info['resume_reason'] = reason
                
                self.active_tasks[task_id] = task_info
                
                self.stats['task_resumes'] += 1
                logger.info(f"Resumed task {task_id}: {reason}")
        
        except Exception as e:
            logger.error(f"Failed to resume task {task_id}: {e}")
    
    async def optimize_resource_allocation(self) -> Dict[str, Any]:
        """优化资源分配 - 基于测试结果的增强版本"""
        try:
            optimization_results = {
                'actions_taken': [],
                'performance_improvement': 0.0,
                'resource_savings': {},
                'optimization_timestamp': datetime.now().isoformat(),
                'system_state_before': {},
                'system_state_after': {}
            }
            
            if not self.load_history:
                return optimization_results
            
            # 分析最近的负载趋势 - 增加分析窗口
            recent_loads = list(self.load_history)[-60:]  # 最近60秒
            if len(recent_loads) < 10:
                return optimization_results
            
            # 记录优化前的系统状态
            latest_load = recent_loads[-1]
            optimization_results['system_state_before'] = {
                'cpu_percent': latest_load.cpu_percent,
                'memory_percent': latest_load.memory_percent,
                'gpu_utilization': latest_load.gpu_utilization,
                'active_streams': latest_load.active_streams,
                'processing_fps': latest_load.processing_fps,
                'queue_length': latest_load.queue_length
            }
            
            # 计算平均负载和趋势
            avg_cpu = statistics.mean([load.cpu_percent for load in recent_loads])
            avg_memory = statistics.mean([load.memory_percent for load in recent_loads])
            avg_gpu = statistics.mean([load.gpu_utilization for load in recent_loads])
            avg_response_time = statistics.mean([load.response_time_ms for load in recent_loads])
            
            # 计算负载趋势（斜率）
            cpu_trend = self._calculate_trend_slope([load.cpu_percent for load in recent_loads[-30:]])
            memory_trend = self._calculate_trend_slope([load.memory_percent for load in recent_loads[-30:]])
            gpu_trend = self._calculate_trend_slope([load.gpu_utilization for load in recent_loads[-30:]])
            
            # 智能CPU优化 - 基于趋势和当前负载
            if avg_cpu > 85 or (avg_cpu > 75 and cpu_trend > 0.5):
                # 动态计算优化幅度
                optimization_factor = min(0.9, max(0.6, 1.0 - (avg_cpu - 75) / 100))
                new_max_streams = max(10, int(self.config.max_concurrent_streams * optimization_factor))
                
                if new_max_streams != self.config.max_concurrent_streams:
                    optimization_results['actions_taken'].append({
                        'action': 'reduce_concurrent_streams',
                        'old_value': self.config.max_concurrent_streams,
                        'new_value': new_max_streams,
                        'reason': f'CPU负载过高: {avg_cpu:.1f}%, 趋势: {cpu_trend:.2f}',
                        'optimization_factor': optimization_factor
                    })
                    self.config.max_concurrent_streams = new_max_streams
                    
                    # 计算预期资源节省
                    streams_reduced = self.config.max_concurrent_streams - new_max_streams
                    optimization_results['resource_savings']['cpu_streams_reduced'] = streams_reduced
            
            # 智能内存优化 - 考虑内存碎片和使用模式
            if avg_memory > 80 or (avg_memory > 70 and memory_trend > 0.3):
                # 基于内存压力动态调整批处理大小
                memory_pressure = (avg_memory - 60) / 40  # 归一化到0-1
                batch_reduction_factor = max(0.5, 1.0 - memory_pressure * 0.4)
                new_batch_size = max(1, int(self.config.batch_size * batch_reduction_factor))
                
                if new_batch_size != self.config.batch_size:
                    optimization_results['actions_taken'].append({
                        'action': 'reduce_batch_size',
                        'old_value': self.config.batch_size,
                        'new_value': new_batch_size,
                        'reason': f'内存压力过高: {avg_memory:.1f}%, 趋势: {memory_trend:.2f}',
                        'memory_pressure': memory_pressure
                    })
                    self.config.batch_size = new_batch_size
                    
                    # 估算内存节省
                    memory_saved_mb = (self.config.batch_size - new_batch_size) * 50  # 假设每个批次50MB
                    optimization_results['resource_savings']['memory_saved_mb'] = memory_saved_mb
            
            # 智能GPU优化 - 考虑GPU内存和计算负载
            if avg_gpu > 90 or (avg_gpu > 80 and gpu_trend > 0.4):
                # 多层次GPU优化策略
                gpu_pressure = (avg_gpu - 70) / 30  # 归一化GPU压力
                
                # 1. 降低处理帧率
                fps_reduction_factor = max(0.6, 1.0 - gpu_pressure * 0.3)
                new_fps = max(1.0, self.config.target_fps * fps_reduction_factor)
                
                if abs(new_fps - self.config.target_fps) > 0.1:
                    optimization_results['actions_taken'].append({
                        'action': 'reduce_target_fps',
                        'old_value': self.config.target_fps,
                        'new_value': new_fps,
                        'reason': f'GPU负载过高: {avg_gpu:.1f}%, 趋势: {gpu_trend:.2f}',
                        'gpu_pressure': gpu_pressure
                    })
                    self.config.target_fps = new_fps
                
                # 2. 如果GPU压力极高，暂停低优先级任务
                if avg_gpu > 95:
                    low_priority_tasks = [task_id for task_id, priority in self.task_priorities.items() 
                                        if priority in [TaskPriority.LOW, TaskPriority.BACKGROUND]]
                    
                    paused_count = 0
                    for task_id in low_priority_tasks[:min(5, len(low_priority_tasks))]:
                        if task_id not in self.paused_tasks:
                            await self._pause_task(task_id, "GPU负载临界，暂停低优先级任务")
                            paused_count += 1
                    
                    if paused_count > 0:
                        optimization_results['actions_taken'].append({
                            'action': 'pause_low_priority_tasks',
                            'old_value': 0,
                            'new_value': paused_count,
                            'reason': f'GPU负载临界: {avg_gpu:.1f}%'
                        })
            
            # 响应时间优化 - 基于延迟分布
            if avg_response_time > self.adaptive_params['response_time_threshold']:
                # 分析延迟分布
                response_times = [load.response_time_ms for load in recent_loads]
                p95_latency = np.percentile(response_times, 95) if response_times else avg_response_time
                
                # 动态调整超时时间
                if p95_latency > self.adaptive_params['response_time_threshold'] * 1.5:
                    timeout_multiplier = min(2.0, p95_latency / self.adaptive_params['response_time_threshold'])
                    new_timeout = int(self.config.processing_timeout * timeout_multiplier)
                    
                    if new_timeout != self.config.processing_timeout:
                        optimization_results['actions_taken'].append({
                            'action': 'increase_timeout',
                            'old_value': self.config.processing_timeout,
                            'new_value': new_timeout,
                            'reason': f'P95延迟过高: {p95_latency:.1f}ms',
                            'p95_latency': p95_latency
                        })
                        self.config.processing_timeout = new_timeout
            
            # 队列长度优化
            avg_queue_length = statistics.mean([load.queue_length for load in recent_loads])
            if avg_queue_length > 50:
                # 队列积压严重，需要增加处理能力或减少输入
                queue_pressure = min(1.0, avg_queue_length / 100)
                
                # 临时增加工作线程（如果资源允许）
                if avg_cpu < 80 and avg_memory < 75:
                    additional_workers = min(2, int(queue_pressure * 4))
                    optimization_results['actions_taken'].append({
                        'action': 'increase_workers',
                        'old_value': self.config.max_concurrent_streams // 50,  # 假设的当前工作线程数
                        'new_value': (self.config.max_concurrent_streams // 50) + additional_workers,
                        'reason': f'队列积压严重: {avg_queue_length:.0f}',
                        'queue_pressure': queue_pressure
                    })
            
            # 自适应阈值调整 - 基于历史性能
            if len(recent_loads) >= 60:
                # 动态调整告警阈值
                cpu_values = [load.cpu_percent for load in recent_loads]
                cpu_p90 = np.percentile(cpu_values, 90)
                
                if cpu_p90 < 70:  # 如果P90都很低，可以提高阈值
                    self.adaptive_thresholds['utilization_warning'] = min(90, cpu_p90 + 10)
                elif cpu_p90 > 85:  # 如果P90很高，降低阈值
                    self.adaptive_thresholds['utilization_warning'] = max(70, cpu_p90 - 5)
            
            # 记录优化后的预期状态
            if optimization_results['actions_taken']:
                self.stats['performance_improvements'] += 1
                self.stats['last_optimization_time'] = datetime.now()
                
                # 估算性能改善
                improvement_score = 0
                for action in optimization_results['actions_taken']:
                    if action['action'] == 'reduce_concurrent_streams':
                        improvement_score += 15  # 减少并发流的效果最显著
                    elif action['action'] == 'reduce_batch_size':
                        improvement_score += 10
                    elif action['action'] == 'reduce_target_fps':
                        improvement_score += 12
                    elif action['action'] == 'pause_low_priority_tasks':
                        improvement_score += 8
                    else:
                        improvement_score += 5
                
                optimization_results['performance_improvement'] = min(50, improvement_score)
                
                # 预测优化后的系统状态
                optimization_results['system_state_after'] = {
                    'expected_cpu_reduction': min(20, improvement_score * 0.4),
                    'expected_memory_reduction': min(15, improvement_score * 0.3),
                    'expected_gpu_reduction': min(25, improvement_score * 0.5),
                    'expected_latency_improvement': min(30, improvement_score * 0.6)
                }
            
            return optimization_results
            
        except Exception as e:
            logger.error(f"Failed to optimize resource allocation: {e}")
            return {
                'actions_taken': [], 
                'performance_improvement': 0.0, 
                'resource_savings': {},
                'error': str(e)
            }
    
    async def get_performance_recommendations(self) -> List[Dict[str, Any]]:
        """获取性能优化建议"""
        recommendations = []
        
        try:
            if not self.load_history:
                return recommendations
            
            recent_loads = list(self.load_history)[-60:]  # 最近1分钟
            if len(recent_loads) < 10:
                return recommendations
            
            # 分析负载趋势
            cpu_trend = [load.cpu_percent for load in recent_loads]
            memory_trend = [load.memory_percent for load in recent_loads]
            gpu_trend = [load.gpu_utilization for load in recent_loads]
            
            # CPU建议
            if statistics.mean(cpu_trend) > 80:
                recommendations.append({
                    'type': 'cpu_optimization',
                    'priority': 'high',
                    'description': 'CPU使用率持续过高',
                    'suggestions': [
                        '减少并发处理流数量',
                        '优化AI模型推理效率',
                        '考虑增加CPU资源'
                    ]
                })
            
            # 内存建议
            if statistics.mean(memory_trend) > 75:
                recommendations.append({
                    'type': 'memory_optimization',
                    'priority': 'high',
                    'description': '内存使用率过高',
                    'suggestions': [
                        '减少帧缓冲区大小',
                        '优化缓存策略',
                        '清理不必要的数据'
                    ]
                })
            
            # GPU建议
            if statistics.mean(gpu_trend) > 85:
                recommendations.append({
                    'type': 'gpu_optimization',
                    'priority': 'critical',
                    'description': 'GPU利用率过高',
                    'suggestions': [
                        '降低处理帧率',
                        '减少批处理大小',
                        '优化GPU内存使用'
                    ]
                })
            
            # 性能趋势建议
            if len(cpu_trend) > 30:
                cpu_slope = self._calculate_trend_slope(cpu_trend[-30:])
                if cpu_slope > 1.0:  # CPU使用率上升趋势
                    recommendations.append({
                        'type': 'trend_warning',
                        'priority': 'medium',
                        'description': 'CPU使用率呈上升趋势',
                        'suggestions': [
                            '监控系统负载变化',
                            '准备性能调整措施',
                            '检查是否有异常任务'
                        ]
                    })
            
            return recommendations
            
        except Exception as e:
            logger.error(f"Failed to get performance recommendations: {e}")
            return recommendations
    
    def _calculate_trend_slope(self, values: List[float]) -> float:
        """计算趋势斜率"""
        if len(values) < 2:
            return 0.0
        
        n = len(values)
        x = list(range(n))
        
        # 简单线性回归计算斜率
        x_mean = sum(x) / n
        y_mean = sum(values) / n
        
        numerator = sum((x[i] - x_mean) * (values[i] - y_mean) for i in range(n))
        denominator = sum((x[i] - x_mean) ** 2 for i in range(n))
        
        if denominator == 0:
            return 0.0
        
        return numerator / denominator
    
    async def _monitoring_loop(self):
        """性能监控循环"""
        while True:
            try:
                # 每30秒检查一次性能状态
                if self.load_history:
                    latest_load = self.load_history[-1]
                    
                    # 记录关键性能指标
                    logger.debug(f"Performance Status - Level: {self.current_performance_level.value}, "
                               f"Load Score: {latest_load.overall_load_score:.1f}, "
                               f"Active Tasks: {len(self.active_tasks)}, "
                               f"Paused Tasks: {len(self.paused_tasks)}")
                
                await asyncio.sleep(30)
                
            except Exception as e:
                logger.error(f"Error in performance monitoring loop: {e}")
                await asyncio.sleep(30)
    
    async def _adjustment_loop(self):
        """性能调整循环"""
        while True:
            try:
                # 每分钟检查是否需要进行性能优化
                current_time = datetime.now()
                
                # 检查冷却时间
                if (current_time - self.last_adjustment_time).total_seconds() >= self.adjustment_cooldown:
                    
                    # 如果系统负载持续过高，执行优化
                    if (self.current_performance_level in [PerformanceLevel.CRITICAL, PerformanceLevel.HIGH] and
                        len(self.load_history) >= 10):
                        
                        # 检查负载是否持续过高
                        recent_loads = list(self.load_history)[-10:]
                        high_load_count = sum(1 for load in recent_loads 
                                            if load.overall_load_score > self.thresholds[PerformanceLevel.HIGH])
                        
                        if high_load_count >= 7:  # 70%的时间负载过高
                            optimization_results = await self.optimize_resource_allocation()
                            if optimization_results['actions_taken']:
                                self.last_adjustment_time = current_time
                                logger.info(f"Performed automatic optimization: {len(optimization_results['actions_taken'])} actions")
                
                await asyncio.sleep(60)  # 每分钟检查一次
                
            except Exception as e:
                logger.error(f"Error in performance adjustment loop: {e}")
                await asyncio.sleep(60)
    
    async def get_controller_stats(self) -> Dict[str, Any]:
        """获取控制器统计信息"""
        current_time = datetime.now()
        
        # 计算负载统计
        load_stats = {}
        if self.load_history:
            recent_loads = list(self.load_history)[-60:]  # 最近1分钟
            load_stats = {
                'avg_cpu_percent': statistics.mean([load.cpu_percent for load in recent_loads]),
                'avg_memory_percent': statistics.mean([load.memory_percent for load in recent_loads]),
                'avg_gpu_utilization': statistics.mean([load.gpu_utilization for load in recent_loads]),
                'avg_response_time_ms': statistics.mean([load.response_time_ms for load in recent_loads]),
                'current_performance_level': self.current_performance_level.value
            }
        
        return {
            'controller_stats': self.stats,
            'current_config': self.config.to_dict(),
            'original_config': self.original_config.to_dict(),
            'load_stats': load_stats,
            'active_tasks': len(self.active_tasks),
            'paused_tasks': len(self.paused_tasks),
            'recent_adjustments': [asdict(action) for action in list(self.performance_history)[-10:]],
            'uptime_seconds': (current_time - self.last_adjustment_time).total_seconds()
        }
    
    async def reset_to_defaults(self):
        """重置到默认配置"""
        try:
            self.config = PerformanceConfig()
            self.current_performance_level = PerformanceLevel.NORMAL
            
            # 恢复所有暂停的任务
            paused_task_ids = list(self.paused_tasks.keys())
            for task_id in paused_task_ids:
                await self._resume_task(task_id, "Configuration reset")
            
            logger.info("Performance controller reset to defaults")
            
        except Exception as e:
            logger.error(f"Failed to reset performance controller: {e}")
    
    async def shutdown(self):
        """关闭自适应性能控制器"""
        try:
            # 停止监控任务
            if self._monitoring_task:
                self._monitoring_task.cancel()
            if self._adjustment_task:
                self._adjustment_task.cancel()
            
            # 恢复所有暂停的任务
            paused_task_ids = list(self.paused_tasks.keys())
            for task_id in paused_task_ids:
                await self._resume_task(task_id, "System shutdown")
            
            logger.info("Adaptive Performance Controller shutdown completed")
            
        except Exception as e:
            logger.error(f"Error during performance controller shutdown: {e}")


# 全局自适应性能控制器实例
adaptive_performance_controller = AdaptivePerformanceController()