"""
GPU资源管理器服务
负责GPU资源的检测、分配、监控和优化
增强版本支持多GPU调度、动态内存分配和实时性能监控
"""
import asyncio
import logging
import psutil
import time
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple, Set
from uuid import uuid4
import subprocess
import json
import threading
from collections import defaultdict, deque

try:
    import GPUtil
    import pynvml
    NVIDIA_AVAILABLE = True
except ImportError:
    NVIDIA_AVAILABLE = False
    logging.warning("NVIDIA GPU libraries not available. GPU monitoring will be limited.")

from schemas.gpu_resource import (
    GPUInfo, GPUStatus, GPUResourceRequest, GPUResourceAllocation,
    AITask, TaskStatus, TaskPriority, GPUClusterInfo, ResourceUsageStats,
    GPUHealthCheck, ResourceOptimizationSuggestion, LoadBalancingConfig
)
from core.config import get_settings

logger = logging.getLogger(__name__)
settings = get_settings()


class EnhancedGPUResourceManager:
    """增强的GPU资源管理器 - 支持大规模实时视频处理"""
    
    def __init__(self):
        self.gpus: Dict[int, GPUInfo] = {}
        self.allocations: Dict[str, GPUResourceAllocation] = {}
        self.usage_history: Dict[int, List[ResourceUsageStats]] = {}
        self.health_checks: Dict[int, GPUHealthCheck] = {}
        self.load_balancing_config = LoadBalancingConfig()
        
        # 多GPU调度增强
        self.gpu_pools: Dict[str, Set[int]] = {
            'high_priority': set(),    # 高优先级GPU池
            'normal_priority': set(),  # 普通优先级GPU池
            'backup': set()           # 备用GPU池
        }
        
        # 动态内存管理
        self.memory_pools: Dict[int, Dict[str, int]] = {}  # gpu_id -> {allocated, reserved, available}
        self.memory_fragmentation: Dict[int, float] = {}   # GPU内存碎片率
        self.memory_allocation_history: Dict[int, deque] = {}  # 内存分配历史
        
        # 实时性能监控
        self.performance_metrics: Dict[int, Dict[str, float]] = {}  # 实时性能指标
        self.throughput_stats: Dict[int, deque] = {}  # 吞吐量统计
        self.latency_stats: Dict[int, deque] = {}     # 延迟统计
        
        # 自适应调整
        self.adaptive_thresholds: Dict[str, float] = {
            'memory_usage_warning': 0.8,
            'memory_usage_critical': 0.9,
            'utilization_warning': 0.85,
            'utilization_critical': 0.95,
            'temperature_warning': 75.0,
            'temperature_critical': 85.0
        }
        
        # 负载均衡增强
        self.load_balancer_stats: Dict[int, Dict[str, float]] = {}
        self.task_distribution: Dict[int, int] = {}  # GPU任务分布统计
        
        # 故障恢复
        self.failed_gpus: Set[int] = set()
        self.recovery_attempts: Dict[int, int] = {}
        
        # 线程安全锁
        self._allocation_lock = threading.RLock()
        self._stats_lock = threading.RLock()
        
        # 初始化NVIDIA管理库
        self._init_nvidia_ml()
        
        # 启动监控任务
        self._monitoring_task = None
        self._cleanup_task = None
        self._performance_task = None
    
    def _init_nvidia_ml(self):
        """初始化NVIDIA管理库"""
        if NVIDIA_AVAILABLE:
            try:
                pynvml.nvmlInit()
                self.nvidia_initialized = True
                logger.info("NVIDIA ML library initialized successfully")
            except Exception as e:
                logger.error(f"Failed to initialize NVIDIA ML: {e}")
                self.nvidia_initialized = False
        else:
            self.nvidia_initialized = False
    
    async def initialize(self):
        """初始化增强的GPU资源管理器"""
        try:
            # 检测GPU设备
            await self.detect_gpus()
            
            # 初始化GPU池分配
            await self._initialize_gpu_pools()
            
            # 初始化内存池
            await self._initialize_memory_pools()
            
            # 初始化性能监控
            await self._initialize_performance_monitoring()
            
            # 启动监控任务
            self._monitoring_task = asyncio.create_task(self._monitoring_loop())
            self._cleanup_task = asyncio.create_task(self._cleanup_loop())
            self._performance_task = asyncio.create_task(self._performance_monitoring_loop())
            
            logger.info(f"Enhanced GPU Resource Manager initialized with {len(self.gpus)} GPUs")
            logger.info(f"GPU Pools: High Priority: {len(self.gpu_pools['high_priority'])}, "
                       f"Normal: {len(self.gpu_pools['normal_priority'])}, "
                       f"Backup: {len(self.gpu_pools['backup'])}")
            
        except Exception as e:
            logger.error(f"Failed to initialize Enhanced GPU Resource Manager: {e}")
            raise
    
    async def detect_gpus(self):
        """检测GPU设备"""
        self.gpus.clear()
        
        if not self.nvidia_initialized:
            logger.warning("NVIDIA libraries not available, using fallback detection")
            await self._detect_gpus_fallback()
            return
        
        try:
            device_count = pynvml.nvmlDeviceGetCount()
            
            for i in range(device_count):
                handle = pynvml.nvmlDeviceGetHandleByIndex(i)
                
                # 获取GPU基本信息
                name = pynvml.nvmlDeviceGetName(handle).decode('utf-8')
                memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
                
                # 获取驱动和CUDA版本
                try:
                    driver_version = pynvml.nvmlSystemGetDriverVersion().decode('utf-8')
                except:
                    driver_version = "Unknown"
                
                try:
                    cuda_version = pynvml.nvmlSystemGetCudaDriverVersion()
                    cuda_version = f"{cuda_version // 1000}.{(cuda_version % 1000) // 10}"
                except:
                    cuda_version = "Unknown"
                
                # 创建GPU信息对象
                gpu_info = GPUInfo(
                    gpu_id=i,
                    name=name,
                    memory_total=memory_info.total // (1024 * 1024),  # 转换为MB
                    memory_used=memory_info.used // (1024 * 1024),
                    memory_free=memory_info.free // (1024 * 1024),
                    utilization=0.0,  # 将在监控循环中更新
                    status=GPUStatus.AVAILABLE,
                    driver_version=driver_version,
                    cuda_version=cuda_version
                )
                
                self.gpus[i] = gpu_info
                self.usage_history[i] = []
                
                logger.info(f"Detected GPU {i}: {name} ({gpu_info.memory_total}MB)")
                
        except Exception as e:
            logger.error(f"Failed to detect NVIDIA GPUs: {e}")
            await self._detect_gpus_fallback()
    
    async def _detect_gpus_fallback(self):
        """备用GPU检测方法"""
        try:
            # 尝试使用nvidia-smi命令
            result = subprocess.run(
                ['nvidia-smi', '--query-gpu=index,name,memory.total,driver_version', '--format=csv,noheader,nounits'],
                capture_output=True,
                text=True,
                timeout=10
            )
            
            if result.returncode == 0:
                lines = result.stdout.strip().split('\n')
                for line in lines:
                    parts = line.split(', ')
                    if len(parts) >= 4:
                        gpu_id = int(parts[0])
                        name = parts[1]
                        memory_total = int(parts[2])
                        driver_version = parts[3]
                        
                        gpu_info = GPUInfo(
                            gpu_id=gpu_id,
                            name=name,
                            memory_total=memory_total,
                            memory_used=0,
                            memory_free=memory_total,
                            utilization=0.0,
                            status=GPUStatus.AVAILABLE,
                            driver_version=driver_version,
                            cuda_version="Unknown"
                        )
                        
                        self.gpus[gpu_id] = gpu_info
                        self.usage_history[gpu_id] = []
                        
                        logger.info(f"Detected GPU {gpu_id} via nvidia-smi: {name}")
            else:
                logger.warning("No NVIDIA GPUs detected")
                
        except Exception as e:
            logger.error(f"Fallback GPU detection failed: {e}")
    
    async def _initialize_gpu_pools(self):
        """初始化GPU池分配"""
        gpu_count = len(self.gpus)
        if gpu_count == 0:
            return
        
        gpu_ids = list(self.gpus.keys())
        
        if gpu_count == 1:
            # 单GPU情况，全部分配给高优先级
            self.gpu_pools['high_priority'].add(gpu_ids[0])
        elif gpu_count == 2:
            # 双GPU情况，各分配一个给高优先级和普通优先级
            self.gpu_pools['high_priority'].add(gpu_ids[0])
            self.gpu_pools['normal_priority'].add(gpu_ids[1])
        else:
            # 多GPU情况，按比例分配
            high_count = max(1, gpu_count // 3)  # 至少1个高优先级
            backup_count = max(1, gpu_count // 4)  # 至少1个备用
            
            # 分配高优先级GPU（选择性能最好的）
            sorted_gpus = sorted(gpu_ids, key=lambda x: self.gpus[x].memory_total, reverse=True)
            for i in range(high_count):
                self.gpu_pools['high_priority'].add(sorted_gpus[i])
            
            # 分配备用GPU（选择最后几个）
            for i in range(backup_count):
                gpu_id = sorted_gpus[-(i+1)]
                if gpu_id not in self.gpu_pools['high_priority']:
                    self.gpu_pools['backup'].add(gpu_id)
            
            # 其余分配给普通优先级
            for gpu_id in gpu_ids:
                if (gpu_id not in self.gpu_pools['high_priority'] and 
                    gpu_id not in self.gpu_pools['backup']):
                    self.gpu_pools['normal_priority'].add(gpu_id)
        
        logger.info(f"GPU pools initialized: High={self.gpu_pools['high_priority']}, "
                   f"Normal={self.gpu_pools['normal_priority']}, Backup={self.gpu_pools['backup']}")
    
    async def _initialize_memory_pools(self):
        """初始化内存池管理"""
        for gpu_id, gpu_info in self.gpus.items():
            total_memory = gpu_info.memory_total
            
            # 为每个GPU初始化内存池
            self.memory_pools[gpu_id] = {
                'total': total_memory,
                'allocated': 0,
                'reserved': int(total_memory * 0.1),  # 预留10%内存
                'available': int(total_memory * 0.9),
                'fragmentation_threshold': int(total_memory * 0.05)  # 5%碎片阈值
            }
            
            # 初始化内存分配历史
            self.memory_allocation_history[gpu_id] = deque(maxlen=1000)
            self.memory_fragmentation[gpu_id] = 0.0
            
        logger.info(f"Memory pools initialized for {len(self.memory_pools)} GPUs")
    
    async def _initialize_performance_monitoring(self):
        """初始化性能监控"""
        for gpu_id in self.gpus.keys():
            # 初始化性能指标
            self.performance_metrics[gpu_id] = {
                'avg_utilization': 0.0,
                'peak_utilization': 0.0,
                'avg_memory_usage': 0.0,
                'peak_memory_usage': 0.0,
                'avg_temperature': 0.0,
                'peak_temperature': 0.0,
                'tasks_per_second': 0.0,
                'avg_task_latency': 0.0
            }
            
            # 初始化统计队列
            self.throughput_stats[gpu_id] = deque(maxlen=300)  # 5分钟历史
            self.latency_stats[gpu_id] = deque(maxlen=300)
            
            # 初始化负载均衡统计
            self.load_balancer_stats[gpu_id] = {
                'total_tasks': 0,
                'successful_tasks': 0,
                'failed_tasks': 0,
                'avg_response_time': 0.0,
                'last_task_time': None
            }
            
            self.task_distribution[gpu_id] = 0
        
        logger.info(f"Performance monitoring initialized for {len(self.performance_metrics)} GPUs")
    
    async def get_gpu_info(self, gpu_id: Optional[int] = None) -> Dict[int, GPUInfo]:
        """获取GPU信息"""
        if gpu_id is not None:
            return {gpu_id: self.gpus[gpu_id]} if gpu_id in self.gpus else {}
        return self.gpus.copy()
    
    async def get_cluster_info(self) -> GPUClusterInfo:
        """获取GPU集群信息"""
        total_gpus = len(self.gpus)
        available_gpus = sum(1 for gpu in self.gpus.values() if gpu.status == GPUStatus.AVAILABLE)
        busy_gpus = sum(1 for gpu in self.gpus.values() if gpu.status == GPUStatus.BUSY)
        error_gpus = sum(1 for gpu in self.gpus.values() if gpu.status == GPUStatus.ERROR)
        
        total_memory = sum(gpu.memory_total for gpu in self.gpus.values())
        used_memory = sum(gpu.memory_used for gpu in self.gpus.values())
        free_memory = total_memory - used_memory
        
        average_utilization = sum(gpu.utilization for gpu in self.gpus.values()) / max(total_gpus, 1)
        
        # 这里需要从任务调度器获取任务统计
        pending_tasks = 0  # TODO: 从任务调度器获取
        running_tasks = 0  # TODO: 从任务调度器获取
        
        return GPUClusterInfo(
            total_gpus=total_gpus,
            available_gpus=available_gpus,
            busy_gpus=busy_gpus,
            error_gpus=error_gpus,
            total_memory=total_memory,
            used_memory=used_memory,
            free_memory=free_memory,
            average_utilization=average_utilization,
            pending_tasks=pending_tasks,
            running_tasks=running_tasks
        )
    
    async def allocate_resource(self, request: GPUResourceRequest) -> Optional[GPUResourceAllocation]:
        """增强的GPU资源分配 - 支持多GPU调度和动态内存管理"""
        async with asyncio.Lock():  # 确保线程安全
            try:
                # 查找合适的GPU
                suitable_gpu = await self._find_suitable_gpu_enhanced(request)
                if suitable_gpu is None:
                    logger.warning("No suitable GPU found for resource request")
                    return None
                
                gpu_id = suitable_gpu.gpu_id
                
                # 动态内存分配
                allocated_memory = await self._allocate_dynamic_memory(gpu_id, request.memory_required)
                if allocated_memory is None:
                    logger.warning(f"Failed to allocate memory on GPU {gpu_id}")
                    return None
                
                # 创建分配记录
                allocation_id = str(uuid4())
                allocation = GPUResourceAllocation(
                    allocation_id=allocation_id,
                    gpu_id=gpu_id,
                    memory_allocated=allocated_memory,
                    allocated_at=datetime.now(),
                    expires_at=datetime.now() + timedelta(seconds=request.max_duration) if request.max_duration else None,
                    exclusive=request.exclusive
                )
                
                # 更新GPU状态
                self.gpus[gpu_id].memory_used += allocated_memory
                self.gpus[gpu_id].memory_free -= allocated_memory
                
                # 更新内存池状态
                self.memory_pools[gpu_id]['allocated'] += allocated_memory
                self.memory_pools[gpu_id]['available'] -= allocated_memory
                
                # 记录内存分配历史
                self.memory_allocation_history[gpu_id].append({
                    'timestamp': datetime.now(),
                    'allocation_id': allocation_id,
                    'memory': allocated_memory,
                    'action': 'allocate'
                })
                
                # 更新任务分布统计
                self.task_distribution[gpu_id] += 1
                
                if request.exclusive or self.gpus[gpu_id].memory_free < 1024:
                    self.gpus[gpu_id].status = GPUStatus.BUSY
                
                # 保存分配记录
                self.allocations[allocation_id] = allocation
                
                logger.info(f"Allocated {allocated_memory}MB on GPU {gpu_id} (pool: {self._get_gpu_pool(gpu_id)}), "
                           f"allocation ID: {allocation_id}")
                return allocation
                
            except Exception as e:
                logger.error(f"Failed to allocate GPU resource: {e}")
                return None
    
    async def allocate_multi_gpu_resource(self, request: GPUResourceRequest, gpu_count: int = 2) -> Optional[List[GPUResourceAllocation]]:
        """多GPU资源分配 - 用于大规模并行处理"""
        async with asyncio.Lock():
            try:
                allocations = []
                memory_per_gpu = request.memory_required // gpu_count
                
                # 查找多个合适的GPU
                suitable_gpus = await self._find_multiple_suitable_gpus(request, gpu_count)
                if len(suitable_gpus) < gpu_count:
                    logger.warning(f"Only found {len(suitable_gpus)} suitable GPUs, required {gpu_count}")
                    return None
                
                # 为每个GPU分配资源
                for gpu in suitable_gpus[:gpu_count]:
                    gpu_request = GPUResourceRequest(
                        memory_required=memory_per_gpu,
                        max_duration=request.max_duration,
                        exclusive=False,  # 多GPU分配通常不独占
                        priority=request.priority,
                        preferred_gpu_ids=[gpu.gpu_id]
                    )
                    
                    allocation = await self.allocate_resource(gpu_request)
                    if allocation:
                        allocations.append(allocation)
                    else:
                        # 如果任何一个分配失败，回滚所有分配
                        for alloc in allocations:
                            await self.release_resource(alloc.allocation_id)
                        return None
                
                logger.info(f"Multi-GPU allocation successful: {len(allocations)} GPUs allocated")
                return allocations
                
            except Exception as e:
                logger.error(f"Failed to allocate multi-GPU resource: {e}")
                return None
    
    async def release_resource(self, allocation_id: str) -> bool:
        """增强的GPU资源释放"""
        async with asyncio.Lock():
            try:
                if allocation_id not in self.allocations:
                    logger.warning(f"Allocation {allocation_id} not found")
                    return False
                
                allocation = self.allocations[allocation_id]
                gpu_id = allocation.gpu_id
                
                # 更新GPU状态
                if gpu_id in self.gpus:
                    self.gpus[gpu_id].memory_used -= allocation.memory_allocated
                    self.gpus[gpu_id].memory_free += allocation.memory_allocated
                    
                    # 更新内存池状态
                    if gpu_id in self.memory_pools:
                        self.memory_pools[gpu_id]['allocated'] -= allocation.memory_allocated
                        self.memory_pools[gpu_id]['available'] += allocation.memory_allocated
                    
                    # 记录内存释放历史
                    if gpu_id in self.memory_allocation_history:
                        self.memory_allocation_history[gpu_id].append({
                            'timestamp': datetime.now(),
                            'allocation_id': allocation_id,
                            'memory': allocation.memory_allocated,
                            'action': 'deallocate'
                        })
                    
                    # 更新任务分布统计
                    if gpu_id in self.task_distribution:
                        self.task_distribution[gpu_id] = max(0, self.task_distribution[gpu_id] - 1)
                    
                    # 如果GPU不再忙碌，更新状态
                    if self.gpus[gpu_id].status == GPUStatus.BUSY:
                        other_allocations = [a for a in self.allocations.values() 
                                           if a.gpu_id == gpu_id and a.allocation_id != allocation_id]
                        if not other_allocations:
                            self.gpus[gpu_id].status = GPUStatus.AVAILABLE
                
                # 删除分配记录
                del self.allocations[allocation_id]
                
                logger.info(f"Released allocation {allocation_id} on GPU {gpu_id}")
                return True
                
            except Exception as e:
                logger.error(f"Failed to release GPU resource: {e}")
                return False
    
    async def get_enhanced_cluster_info(self) -> Dict[str, any]:
        """获取增强的集群信息"""
        cluster_info = await self.get_cluster_info()
        
        # 添加增强信息
        enhanced_info = {
            'basic_info': cluster_info.__dict__,
            'gpu_pools': {
                pool_name: list(gpu_set) 
                for pool_name, gpu_set in self.gpu_pools.items()
            },
            'memory_pools': self.memory_pools,
            'performance_metrics': self.performance_metrics,
            'failed_gpus': list(self.failed_gpus),
            'adaptive_thresholds': self.adaptive_thresholds,
            'task_distribution': self.task_distribution,
            'memory_fragmentation': self.memory_fragmentation
        }
        
        return enhanced_info
    
    async def get_gpu_performance_report(self, gpu_id: Optional[int] = None, hours: int = 1) -> Dict[str, any]:
        """获取GPU性能报告"""
        if gpu_id is not None:
            gpu_ids = [gpu_id] if gpu_id in self.gpus else []
        else:
            gpu_ids = list(self.gpus.keys())
        
        report = {}
        
        for gid in gpu_ids:
            gpu_report = {
                'gpu_info': self.gpus[gid].__dict__,
                'performance_metrics': self.performance_metrics.get(gid, {}),
                'memory_pool': self.memory_pools.get(gid, {}),
                'usage_history': await self.get_usage_stats(gid, hours),
                'throughput_stats': list(self.throughput_stats.get(gid, [])),
                'latency_stats': list(self.latency_stats.get(gid, [])),
                'load_balancer_stats': self.load_balancer_stats.get(gid, {}),
                'health_check': self.health_checks.get(gid, {}),
                'pool_assignment': self._get_gpu_pool(gid),
                'is_failed': gid in self.failed_gpus,
                'recovery_attempts': self.recovery_attempts.get(gid, 0)
            }
            
            report[f'gpu_{gid}'] = gpu_report
        
        return report
    
    async def optimize_gpu_allocation(self) -> List[ResourceOptimizationSuggestion]:
        """GPU分配优化建议 - 基于测试结果的增强版本"""
        suggestions = []
        
        try:
            # 获取当前GPU状态和性能指标
            cluster_info = await self.get_enhanced_cluster_info()
            
            # 1. 智能GPU池重新平衡
            high_priority_load = sum(self.task_distribution.get(gpu_id, 0) 
                                   for gpu_id in self.gpu_pools['high_priority'])
            normal_priority_load = sum(self.task_distribution.get(gpu_id, 0) 
                                     for gpu_id in self.gpu_pools['normal_priority'])
            backup_priority_load = sum(self.task_distribution.get(gpu_id, 0) 
                                     for gpu_id in self.gpu_pools['backup'])
            
            total_load = high_priority_load + normal_priority_load + backup_priority_load
            
            if total_load > 0:
                high_load_ratio = high_priority_load / total_load
                normal_load_ratio = normal_priority_load / total_load
                
                # 如果高优先级负载比例过高（>60%）
                if high_load_ratio > 0.6:
                    suggestions.append(ResourceOptimizationSuggestion(
                        suggestion_type="pool_rebalancing_critical",
                        description=f"高优先级GPU池负载过重 ({high_load_ratio:.1%})，建议紧急重新分配",
                        priority="high",
                        potential_improvement="可提升15-25%的任务处理效率",
                        implementation_steps=[
                            "立即将2-3个普通优先级GPU升级为高优先级",
                            "实施动态负载均衡策略",
                            "启用GPU池自动扩缩容",
                            "设置负载监控告警"
                        ]
                    ))
                
                # 如果负载分布不均匀
                elif abs(high_load_ratio - normal_load_ratio) > 0.4:
                    suggestions.append(ResourceOptimizationSuggestion(
                        suggestion_type="pool_rebalancing",
                        description=f"GPU池负载分布不均匀 (高:{high_load_ratio:.1%}, 普通:{normal_load_ratio:.1%})",
                        priority="medium",
                        potential_improvement="可提升10-15%的任务处理效率",
                        implementation_steps=[
                            "分析任务分布模式",
                            "调整GPU池分配比例",
                            "优化任务路由策略",
                            "监控重新分配效果"
                        ]
                    ))
            
            # 2. 内存碎片化优化
            high_fragmentation_gpus = []
            critical_fragmentation_gpus = []
            
            for gpu_id, frag in self.memory_fragmentation.items():
                if frag > 0.5:  # 50%以上碎片化
                    critical_fragmentation_gpus.append(gpu_id)
                elif frag > 0.3:  # 30%以上碎片化
                    high_fragmentation_gpus.append(gpu_id)
            
            if critical_fragmentation_gpus:
                suggestions.append(ResourceOptimizationSuggestion(
                    suggestion_type="memory_defragmentation_critical",
                    description=f"GPU {critical_fragmentation_gpus} 内存碎片化严重 (>50%)",
                    priority="high",
                    potential_improvement="可提升20-30%的内存利用率",
                    implementation_steps=[
                        "立即执行内存碎片整理",
                        "暂停非关键任务释放内存",
                        "重新启动GPU内存管理器",
                        "实施预防性内存分配策略"
                    ]
                ))
            
            elif high_fragmentation_gpus:
                suggestions.append(ResourceOptimizationSuggestion(
                    suggestion_type="memory_defragmentation",
                    description=f"GPU {high_fragmentation_gpus} 内存碎片化较高 (30-50%)",
                    priority="medium",
                    potential_improvement="可提升10-15%的内存利用率",
                    implementation_steps=[
                        "执行内存碎片整理",
                        "优化内存分配策略",
                        "调整批处理大小",
                        "定期执行内存维护"
                    ]
                ))
            
            # 3. GPU利用率优化
            gpu_utilizations = []
            underutilized_gpus = []
            overutilized_gpus = []
            
            for gpu_id, gpu_info in self.gpus.items():
                utilization = gpu_info.utilization
                gpu_utilizations.append(utilization)
                
                if utilization < 30:  # 利用率低于30%
                    underutilized_gpus.append(gpu_id)
                elif utilization > 95:  # 利用率超过95%
                    overutilized_gpus.append(gpu_id)
            
            if underutilized_gpus and overutilized_gpus:
                suggestions.append(ResourceOptimizationSuggestion(
                    suggestion_type="load_balancing_optimization",
                    description=f"GPU负载不均衡：{len(underutilized_gpus)}个GPU利用率过低，{len(overutilized_gpus)}个GPU过载",
                    priority="high",
                    potential_improvement="可提升25-35%的整体GPU利用率",
                    implementation_steps=[
                        "实施智能任务迁移",
                        "调整任务分配算法",
                        "启用动态负载均衡",
                        "优化GPU亲和性策略"
                    ]
                ))
            
            # 4. 批处理大小优化
            avg_gpu_memory_usage = sum(gpu.memory_used / gpu.memory_total * 100 
                                     for gpu in self.gpus.values()) / len(self.gpus)
            
            if avg_gpu_memory_usage < 50:  # 内存使用率低
                suggestions.append(ResourceOptimizationSuggestion(
                    suggestion_type="batch_size_increase",
                    description=f"GPU内存使用率较低 ({avg_gpu_memory_usage:.1f}%)，可增加批处理大小",
                    priority="medium",
                    potential_improvement="可提升20-30%的处理吞吐量",
                    implementation_steps=[
                        "逐步增加批处理大小",
                        "监控内存使用情况",
                        "调整并发任务数量",
                        "优化数据预加载策略"
                    ]
                ))
            
            elif avg_gpu_memory_usage > 85:  # 内存使用率高
                suggestions.append(ResourceOptimizationSuggestion(
                    suggestion_type="batch_size_decrease",
                    description=f"GPU内存使用率过高 ({avg_gpu_memory_usage:.1f}%)，建议减少批处理大小",
                    priority="high",
                    potential_improvement="可减少15-20%的内存压力",
                    implementation_steps=[
                        "减少批处理大小",
                        "优化内存分配策略",
                        "清理不必要的缓存",
                        "实施内存监控告警"
                    ]
                ))
            
            # 5. 故障GPU恢复建议
            if self.failed_gpus:
                recovery_suggestions = []
                for gpu_id in self.failed_gpus:
                    recovery_attempts = self.recovery_attempts.get(gpu_id, 0)
                    if recovery_attempts < 3:
                        recovery_suggestions.append(f"GPU {gpu_id} (尝试次数: {recovery_attempts})")
                
                if recovery_suggestions:
                    suggestions.append(ResourceOptimizationSuggestion(
                        suggestion_type="gpu_recovery",
                        description=f"检测到故障GPU可尝试恢复: {', '.join(recovery_suggestions)}",
                        priority="medium",
                        potential_improvement="可恢复10-20%的GPU计算能力",
                        implementation_steps=[
                            "执行GPU重置操作",
                            "检查驱动程序状态",
                            "清理GPU内存",
                            "重新初始化GPU资源"
                        ]
                    ))
            
            # 6. 性能监控优化建议
            if len(self.performance_metrics) > 0:
                avg_task_latency = sum(metrics.get('avg_task_latency', 0) 
                                     for metrics in self.performance_metrics.values()) / len(self.performance_metrics)
                
                if avg_task_latency > 1000:  # 平均延迟超过1秒
                    suggestions.append(ResourceOptimizationSuggestion(
                        suggestion_type="latency_optimization",
                        description=f"GPU任务平均延迟过高 ({avg_task_latency:.0f}ms)",
                        priority="high",
                        potential_improvement="可减少30-40%的任务延迟",
                        implementation_steps=[
                            "优化GPU任务调度算法",
                            "减少数据传输开销",
                            "启用GPU流水线处理",
                            "优化模型推理效率"
                        ]
                    ))
            
            # 7. 自动扩缩容建议
            total_task_count = sum(self.task_distribution.values())
            gpu_count = len(self.gpus)
            
            if gpu_count > 0:
                avg_tasks_per_gpu = total_task_count / gpu_count
                
                if avg_tasks_per_gpu > 20:  # 每个GPU任务过多
                    suggestions.append(ResourceOptimizationSuggestion(
                        suggestion_type="scale_out_recommendation",
                        description=f"GPU负载过重 (平均每GPU {avg_tasks_per_gpu:.1f}个任务)，建议扩容",
                        priority="medium",
                        potential_improvement="可提升20-30%的处理能力",
                        implementation_steps=[
                            "评估GPU扩容需求",
                            "添加新的GPU资源",
                            "调整GPU池配置",
                            "更新负载均衡策略"
                        ]
                    ))
                
                elif avg_tasks_per_gpu < 2 and gpu_count > 2:  # GPU资源过剩
                    suggestions.append(ResourceOptimizationSuggestion(
                        suggestion_type="scale_in_recommendation",
                        description=f"GPU资源利用率低 (平均每GPU {avg_tasks_per_gpu:.1f}个任务)，可考虑缩容",
                        priority="low",
                        potential_improvement="可节省10-20%的资源成本",
                        implementation_steps=[
                            "分析GPU使用模式",
                            "识别可释放的GPU",
                            "调整GPU池大小",
                            "优化资源分配策略"
                        ]
                    ))
            
            # 按优先级排序建议
            suggestions.sort(key=lambda x: {"high": 3, "medium": 2, "low": 1}[x.priority], reverse=True)
            
            return suggestions
            
        except Exception as e:
            logger.error(f"Failed to generate GPU optimization suggestions: {e}")
            return [ResourceOptimizationSuggestion(
                suggestion_type="error",
                description=f"优化分析失败: {str(e)}",
                priority="low",
                potential_improvement="需要修复分析错误",
                implementation_steps=["检查GPU资源管理器状态", "重新运行优化分析"]
            )]
    
    async def rebalance_gpu_pools(self) -> bool:
        """重新平衡GPU池分配"""
        try:
            # 分析当前负载分布
            pool_loads = {}
            for pool_name, gpu_set in self.gpu_pools.items():
                pool_loads[pool_name] = sum(self.task_distribution.get(gpu_id, 0) 
                                          for gpu_id in gpu_set)
            
            # 如果负载不均衡，重新分配
            total_load = sum(pool_loads.values())
            if total_load == 0:
                return True
            
            # 简单的重新平衡逻辑
            target_high_load = total_load * 0.4  # 高优先级承担40%负载
            target_normal_load = total_load * 0.5  # 普通优先级承担50%负载
            target_backup_load = total_load * 0.1  # 备用承担10%负载
            
            current_high_load = pool_loads.get('high_priority', 0)
            
            # 如果高优先级负载过重
            if current_high_load > target_high_load * 1.5:
                # 从普通优先级池中选择一个GPU升级
                normal_gpus = list(self.gpu_pools['normal_priority'])
                if normal_gpus:
                    # 选择负载最轻的GPU
                    best_gpu = min(normal_gpus, key=lambda x: self.task_distribution.get(x, 0))
                    
                    # 移动GPU
                    self.gpu_pools['normal_priority'].remove(best_gpu)
                    self.gpu_pools['high_priority'].add(best_gpu)
                    
                    logger.info(f"Rebalanced: moved GPU {best_gpu} from normal to high priority pool")
            
            return True
            
        except Exception as e:
            logger.error(f"Failed to rebalance GPU pools: {e}")
            return False
    
    async def _find_suitable_gpu_enhanced(self, request: GPUResourceRequest) -> Optional[GPUInfo]:
        """增强的GPU查找 - 支持优先级池和智能选择"""
        # 根据任务优先级选择GPU池
        target_pools = []
        if hasattr(request, 'priority') and request.priority == TaskPriority.HIGH:
            target_pools = ['high_priority', 'normal_priority', 'backup']
        elif hasattr(request, 'priority') and request.priority == TaskPriority.LOW:
            target_pools = ['backup', 'normal_priority']
        else:
            target_pools = ['normal_priority', 'high_priority', 'backup']
        
        suitable_gpus = []
        
        # 按优先级池顺序查找
        for pool_name in target_pools:
            pool_gpus = []
            
            for gpu_id in self.gpu_pools[pool_name]:
                if gpu_id not in self.gpus:
                    continue
                
                gpu = self.gpus[gpu_id]
                
                # 检查GPU状态
                if gpu.status not in [GPUStatus.AVAILABLE, GPUStatus.BUSY]:
                    continue
                
                # 检查是否在故障列表中
                if gpu_id in self.failed_gpus:
                    continue
                
                # 检查内存需求（考虑内存池）
                available_memory = self.memory_pools[gpu_id]['available']
                if available_memory < request.memory_required:
                    continue
                
                # 检查独占需求
                if request.exclusive and gpu.status == GPUStatus.BUSY:
                    continue
                
                # 检查首选GPU
                if request.preferred_gpu_ids and gpu.gpu_id not in request.preferred_gpu_ids:
                    continue
                
                # 检查温度和健康状态
                if gpu.temperature and gpu.temperature > self.adaptive_thresholds['temperature_critical']:
                    continue
                
                pool_gpus.append(gpu)
            
            if pool_gpus:
                # 在当前池中找到合适的GPU，使用增强策略选择
                selected_gpu = await self._select_gpu_by_enhanced_strategy(pool_gpus, request)
                if selected_gpu:
                    return selected_gpu
        
        return None
    
    async def _find_multiple_suitable_gpus(self, request: GPUResourceRequest, count: int) -> List[GPUInfo]:
        """查找多个合适的GPU用于并行处理"""
        suitable_gpus = []
        memory_per_gpu = request.memory_required // count
        
        # 创建单GPU请求
        single_request = GPUResourceRequest(
            memory_required=memory_per_gpu,
            max_duration=request.max_duration,
            exclusive=False,
            priority=request.priority if hasattr(request, 'priority') else TaskPriority.NORMAL
        )
        
        # 收集所有可用GPU
        all_suitable = []
        for gpu in self.gpus.values():
            if (gpu.status in [GPUStatus.AVAILABLE, GPUStatus.BUSY] and
                gpu.gpu_id not in self.failed_gpus and
                self.memory_pools[gpu.gpu_id]['available'] >= memory_per_gpu):
                all_suitable.append(gpu)
        
        # 按性能排序选择最佳GPU组合
        all_suitable.sort(key=lambda g: (
            -g.memory_total,  # 内存大小降序
            g.utilization,    # 利用率升序
            g.temperature or 0  # 温度升序
        ))
        
        return all_suitable[:count]
    
    async def _allocate_dynamic_memory(self, gpu_id: int, requested_memory: int) -> Optional[int]:
        """动态内存分配 - 支持内存碎片整理和优化"""
        if gpu_id not in self.memory_pools:
            return None
        
        pool = self.memory_pools[gpu_id]
        
        # 检查可用内存
        if pool['available'] < requested_memory:
            # 尝试内存碎片整理
            await self._defragment_memory(gpu_id)
            if pool['available'] < requested_memory:
                return None
        
        # 计算实际分配内存（考虑对齐和优化）
        aligned_memory = self._align_memory_allocation(requested_memory)
        
        # 检查内存碎片化程度
        fragmentation = await self._calculate_memory_fragmentation(gpu_id)
        if fragmentation > 0.3:  # 碎片化超过30%
            logger.warning(f"High memory fragmentation on GPU {gpu_id}: {fragmentation:.2%}")
            # 可以触发后台碎片整理
            asyncio.create_task(self._defragment_memory(gpu_id))
        
        return aligned_memory
    
    def _align_memory_allocation(self, memory: int) -> int:
        """内存对齐分配 - 提高GPU内存访问效率"""
        # 对齐到256MB边界，提高内存访问效率
        alignment = 256
        return ((memory + alignment - 1) // alignment) * alignment
    
    async def _calculate_memory_fragmentation(self, gpu_id: int) -> float:
        """计算内存碎片化程度"""
        if gpu_id not in self.memory_allocation_history:
            return 0.0
        
        history = list(self.memory_allocation_history[gpu_id])
        if len(history) < 10:  # 历史记录不足
            return 0.0
        
        # 简单的碎片化计算：分配/释放操作的不均匀程度
        allocations = [h for h in history if h['action'] == 'allocate']
        deallocations = [h for h in history if h['action'] == 'deallocate']
        
        if not allocations:
            return 0.0
        
        # 计算分配大小的方差作为碎片化指标
        sizes = [a['memory'] for a in allocations[-50:]]  # 最近50次分配
        if len(sizes) < 2:
            return 0.0
        
        mean_size = sum(sizes) / len(sizes)
        variance = sum((s - mean_size) ** 2 for s in sizes) / len(sizes)
        
        # 归一化碎片化程度
        fragmentation = min(1.0, variance / (mean_size ** 2))
        self.memory_fragmentation[gpu_id] = fragmentation
        
        return fragmentation
    
    async def _defragment_memory(self, gpu_id: int):
        """内存碎片整理 - 后台任务"""
        try:
            # 这里可以实现内存碎片整理逻辑
            # 例如：重新组织内存分配，合并小块内存等
            logger.info(f"Starting memory defragmentation for GPU {gpu_id}")
            
            # 模拟碎片整理过程
            await asyncio.sleep(0.1)
            
            # 更新碎片化状态
            self.memory_fragmentation[gpu_id] = max(0.0, self.memory_fragmentation[gpu_id] - 0.1)
            
            logger.info(f"Memory defragmentation completed for GPU {gpu_id}")
            
        except Exception as e:
            logger.error(f"Memory defragmentation failed for GPU {gpu_id}: {e}")
    
    def _get_gpu_pool(self, gpu_id: int) -> str:
        """获取GPU所属的池"""
        for pool_name, gpu_set in self.gpu_pools.items():
            if gpu_id in gpu_set:
                return pool_name
        return "unknown"
    
    async def _select_gpu_by_enhanced_strategy(self, gpus: List[GPUInfo], request: GPUResourceRequest) -> GPUInfo:
        """增强的GPU选择策略 - 考虑多种因素的智能选择"""
        if not gpus:
            return None
        
        if len(gpus) == 1:
            return gpus[0]
        
        strategy = self.load_balancing_config.strategy
        
        if strategy == "intelligent":
            return await self._intelligent_gpu_selection(gpus, request)
        elif strategy == "performance_optimized":
            return await self._performance_optimized_selection(gpus)
        elif strategy == "load_balanced":
            return await self._load_balanced_selection(gpus)
        else:
            # 使用原有策略作为后备
            return await self._select_gpu_by_strategy(gpus)
    
    async def _intelligent_gpu_selection(self, gpus: List[GPUInfo], request: GPUResourceRequest) -> GPUInfo:
        """智能GPU选择 - 综合考虑性能、负载、健康状态"""
        def calculate_intelligent_score(gpu: GPUInfo) -> float:
            score = 0.0
            
            # 内存可用性 (30%)
            memory_ratio = self.memory_pools[gpu.gpu_id]['available'] / self.memory_pools[gpu.gpu_id]['total']
            score += 0.3 * memory_ratio
            
            # 利用率 (25%) - 倾向于选择利用率适中的GPU
            utilization_score = 1.0 - abs(gpu.utilization - 50) / 50  # 50%利用率最优
            score += 0.25 * max(0, utilization_score)
            
            # 温度 (20%)
            temp_score = 1.0 - (gpu.temperature or 50) / 100
            score += 0.2 * max(0, temp_score)
            
            # 任务分布均衡性 (15%)
            avg_tasks = sum(self.task_distribution.values()) / len(self.task_distribution)
            balance_score = 1.0 - abs(self.task_distribution[gpu.gpu_id] - avg_tasks) / max(avg_tasks, 1)
            score += 0.15 * max(0, balance_score)
            
            # 历史性能 (10%)
            perf_metrics = self.performance_metrics.get(gpu.gpu_id, {})
            avg_latency = perf_metrics.get('avg_task_latency', 1000)  # 默认1秒
            latency_score = 1.0 - min(avg_latency / 5000, 1.0)  # 5秒为最差
            score += 0.1 * latency_score
            
            return score
        
        return max(gpus, key=calculate_intelligent_score)
    
    async def _performance_optimized_selection(self, gpus: List[GPUInfo]) -> GPUInfo:
        """性能优化选择 - 优先选择性能最佳的GPU"""
        def calculate_performance_score(gpu: GPUInfo) -> float:
            score = 0.0
            
            # GPU内存大小 (40%)
            score += 0.4 * (gpu.memory_total / 24000)  # 假设24GB为满分
            
            # 当前利用率 (30%) - 倾向于低利用率
            score += 0.3 * (1 - gpu.utilization / 100)
            
            # 温度 (20%)
            score += 0.2 * (1 - (gpu.temperature or 50) / 100)
            
            # 历史吞吐量 (10%)
            throughput_stats = self.throughput_stats.get(gpu.gpu_id, deque())
            if throughput_stats:
                avg_throughput = sum(throughput_stats) / len(throughput_stats)
                score += 0.1 * min(avg_throughput / 100, 1.0)  # 100 tasks/sec为满分
            
            return score
        
        return max(gpus, key=calculate_performance_score)
    
    async def _load_balanced_selection(self, gpus: List[GPUInfo]) -> GPUInfo:
        """负载均衡选择 - 优先选择负载最轻的GPU"""
        def calculate_load_score(gpu: GPUInfo) -> float:
            # 综合负载评分
            memory_load = gpu.memory_used / gpu.memory_total
            utilization_load = gpu.utilization / 100
            task_load = self.task_distribution[gpu.gpu_id] / max(sum(self.task_distribution.values()), 1)
            
            # 负载越低，评分越高
            return 1.0 - (0.4 * memory_load + 0.4 * utilization_load + 0.2 * task_load)
        
        return max(gpus, key=calculate_load_score)
    
    async def _select_gpu_by_strategy(self, gpus: List[GPUInfo]) -> GPUInfo:
        """原有的GPU选择策略（保持兼容性）"""
        strategy = self.load_balancing_config.strategy
        
        if strategy == "round_robin":
            return min(gpus, key=lambda g: g.gpu_id)
        elif strategy == "least_used":
            return min(gpus, key=lambda g: g.utilization)
        elif strategy == "most_memory":
            return max(gpus, key=lambda g: g.memory_free)
        elif strategy == "weighted_round_robin":
            weights = self.load_balancing_config.weight_factors
            
            def calculate_score(gpu: GPUInfo) -> float:
                score = 0.0
                score += weights.get("memory_usage", 0.3) * (1 - gpu.memory_used / gpu.memory_total)
                score += weights.get("utilization", 0.3) * (1 - gpu.utilization / 100)
                score += weights.get("temperature", 0.2) * (1 - (gpu.temperature or 50) / 100)
                return score
            
            return max(gpus, key=calculate_score)
        else:
            return gpus[0]
    
    async def get_usage_stats(self, gpu_id: int, hours: int = 24) -> List[ResourceUsageStats]:
        """获取GPU使用统计"""
        if gpu_id not in self.usage_history:
            return []
        
        cutoff_time = datetime.now() - timedelta(hours=hours)
        return [stat for stat in self.usage_history[gpu_id] if stat.timestamp >= cutoff_time]
    
    async def perform_health_check(self, gpu_id: Optional[int] = None) -> Dict[int, GPUHealthCheck]:
        """执行GPU健康检查"""
        results = {}
        gpu_ids = [gpu_id] if gpu_id is not None else list(self.gpus.keys())
        
        for gid in gpu_ids:
            if gid not in self.gpus:
                continue
            
            gpu = self.gpus[gid]
            issues = []
            warnings = []
            recommendations = []
            
            # 检查温度
            if gpu.temperature and gpu.temperature > 85:
                issues.append("GPU温度过高")
                recommendations.append("检查散热系统")
            elif gpu.temperature and gpu.temperature > 75:
                warnings.append("GPU温度偏高")
                recommendations.append("增强散热")
            
            # 检查内存使用
            memory_usage_percent = (gpu.memory_used / gpu.memory_total) * 100
            if memory_usage_percent > 90:
                warnings.append("显存使用率过高")
                recommendations.append("优化内存使用")
            
            # 检查利用率
            if gpu.utilization > 95:
                warnings.append("GPU利用率过高")
                recommendations.append("考虑负载均衡")
            
            # 计算健康评分
            health_score = 100.0
            health_score -= len(issues) * 20
            health_score -= len(warnings) * 10
            health_score = max(0, health_score)
            
            health_check = GPUHealthCheck(
                gpu_id=gid,
                health_score=health_score,
                issues=issues,
                warnings=warnings,
                recommendations=recommendations,
                last_check=datetime.now()
            )
            
            results[gid] = health_check
            self.health_checks[gid] = health_check
        
        return results
    
    async def get_optimization_suggestions(self) -> List[ResourceOptimizationSuggestion]:
        """获取资源优化建议"""
        suggestions = []
        
        # 分析集群状态
        cluster_info = await self.get_cluster_info()
        
        # 内存使用优化建议
        if cluster_info.used_memory / cluster_info.total_memory > 0.8:
            suggestions.append(ResourceOptimizationSuggestion(
                suggestion_type="memory_optimization",
                description="集群显存使用率过高，建议优化内存分配策略",
                priority="high",
                potential_improvement="可提升15-25%的任务处理能力",
                implementation_steps=[
                    "分析当前内存使用模式",
                    "实施内存池优化",
                    "调整任务调度策略",
                    "考虑增加GPU资源"
                ]
            ))
        
        # 负载均衡优化建议
        utilizations = [gpu.utilization for gpu in self.gpus.values()]
        if len(utilizations) > 1:
            max_util = max(utilizations)
            min_util = min(utilizations)
            if max_util - min_util > 30:
                suggestions.append(ResourceOptimizationSuggestion(
                    suggestion_type="load_balancing",
                    description="GPU负载不均衡，建议优化任务分配策略",
                    priority="medium",
                    potential_improvement="可提升10-20%的整体效率",
                    implementation_steps=[
                        "分析任务分配模式",
                        "调整负载均衡算法",
                        "实施动态任务迁移",
                        "优化任务优先级策略"
                    ]
                ))
        
        # 健康状态优化建议
        for gpu_id, health_check in self.health_checks.items():
            if health_check.health_score < 70:
                suggestions.append(ResourceOptimizationSuggestion(
                    suggestion_type="health_optimization",
                    description=f"GPU {gpu_id}健康状态不佳，需要维护",
                    priority="high" if health_check.health_score < 50 else "medium",
                    potential_improvement="可避免硬件故障和性能下降",
                    implementation_steps=health_check.recommendations
                ))
        
        return suggestions
    
    async def _monitoring_loop(self):
        """增强的监控循环"""
        while True:
            try:
                await self._update_gpu_stats()
                await self._record_usage_stats()
                await self._update_adaptive_thresholds()
                await self._check_gpu_health()
                await asyncio.sleep(30)  # 每30秒更新一次
            except Exception as e:
                logger.error(f"Error in monitoring loop: {e}")
                await asyncio.sleep(60)  # 出错时等待更长时间
    
    async def _performance_monitoring_loop(self):
        """性能监控循环 - 更频繁的性能数据收集"""
        while True:
            try:
                await self._update_performance_metrics()
                await self._update_throughput_stats()
                await self._detect_performance_anomalies()
                await asyncio.sleep(10)  # 每10秒更新性能指标
            except Exception as e:
                logger.error(f"Error in performance monitoring loop: {e}")
                await asyncio.sleep(30)
    
    async def _update_gpu_stats(self):
        """更新GPU统计信息"""
        if not self.nvidia_initialized:
            return
        
        try:
            for gpu_id in self.gpus.keys():
                handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
                
                # 更新内存信息
                memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
                self.gpus[gpu_id].memory_used = memory_info.used // (1024 * 1024)
                self.gpus[gpu_id].memory_free = memory_info.free // (1024 * 1024)
                
                # 更新利用率
                try:
                    utilization = pynvml.nvmlDeviceGetUtilizationRates(handle)
                    self.gpus[gpu_id].utilization = utilization.gpu
                except:
                    pass
                
                # 更新温度
                try:
                    temperature = pynvml.nvmlDeviceGetTemperature(handle, pynvml.NVML_TEMPERATURE_GPU)
                    self.gpus[gpu_id].temperature = temperature
                except:
                    pass
                
                # 更新功耗
                try:
                    power = pynvml.nvmlDeviceGetPowerUsage(handle) / 1000.0  # 转换为瓦特
                    self.gpus[gpu_id].power_usage = power
                except:
                    pass
                
        except Exception as e:
            logger.error(f"Failed to update GPU stats: {e}")
    
    async def _record_usage_stats(self):
        """记录使用统计"""
        current_time = datetime.now()
        
        for gpu_id, gpu in self.gpus.items():
            stat = ResourceUsageStats(
                gpu_id=gpu_id,
                timestamp=current_time,
                utilization=gpu.utilization,
                memory_used=gpu.memory_used,
                temperature=gpu.temperature,
                power_usage=gpu.power_usage
            )
            
            self.usage_history[gpu_id].append(stat)
            
            # 保持最近24小时的数据
            cutoff_time = current_time - timedelta(hours=24)
            self.usage_history[gpu_id] = [
                s for s in self.usage_history[gpu_id] if s.timestamp >= cutoff_time
            ]
    
    async def _update_adaptive_thresholds(self):
        """更新自适应阈值"""
        try:
            # 基于历史数据动态调整阈值
            for gpu_id, gpu in self.gpus.items():
                if gpu_id not in self.usage_history or len(self.usage_history[gpu_id]) < 10:
                    continue
                
                recent_stats = self.usage_history[gpu_id][-100:]  # 最近100个数据点
                
                # 计算平均值和标准差
                utilizations = [s.utilization for s in recent_stats]
                temperatures = [s.temperature for s in recent_stats if s.temperature]
                
                if utilizations:
                    avg_util = sum(utilizations) / len(utilizations)
                    # 动态调整利用率阈值
                    if avg_util > 80:
                        self.adaptive_thresholds['utilization_warning'] = min(90, avg_util + 10)
                    elif avg_util < 50:
                        self.adaptive_thresholds['utilization_warning'] = max(70, avg_util + 20)
                
                if temperatures:
                    avg_temp = sum(temperatures) / len(temperatures)
                    # 动态调整温度阈值
                    if avg_temp > 70:
                        self.adaptive_thresholds['temperature_warning'] = min(80, avg_temp + 5)
                    elif avg_temp < 60:
                        self.adaptive_thresholds['temperature_warning'] = max(70, avg_temp + 10)
        
        except Exception as e:
            logger.error(f"Failed to update adaptive thresholds: {e}")
    
    async def _check_gpu_health(self):
        """检查GPU健康状态"""
        try:
            for gpu_id, gpu in self.gpus.items():
                # 检查是否需要从故障列表中恢复
                if gpu_id in self.failed_gpus:
                    if await self._attempt_gpu_recovery(gpu_id):
                        self.failed_gpus.remove(gpu_id)
                        logger.info(f"GPU {gpu_id} recovered from failure")
                
                # 检查是否需要标记为故障
                elif await self._is_gpu_failing(gpu_id):
                    self.failed_gpus.add(gpu_id)
                    logger.warning(f"GPU {gpu_id} marked as failing")
        
        except Exception as e:
            logger.error(f"Failed to check GPU health: {e}")
    
    async def _is_gpu_failing(self, gpu_id: int) -> bool:
        """检查GPU是否出现故障"""
        gpu = self.gpus.get(gpu_id)
        if not gpu:
            return True
        
        # 检查温度过高
        if gpu.temperature and gpu.temperature > self.adaptive_thresholds['temperature_critical']:
            return True
        
        # 检查内存错误
        if gpu.memory_used > gpu.memory_total:  # 不应该发生
            return True
        
        # 检查利用率异常
        if gpu.utilization > 100:  # 不应该发生
            return True
        
        return False
    
    async def _attempt_gpu_recovery(self, gpu_id: int) -> bool:
        """尝试恢复故障GPU"""
        try:
            # 增加恢复尝试计数
            self.recovery_attempts[gpu_id] = self.recovery_attempts.get(gpu_id, 0) + 1
            
            # 如果尝试次数过多，放弃恢复
            if self.recovery_attempts[gpu_id] > 5:
                return False
            
            # 检查GPU是否恢复正常
            gpu = self.gpus.get(gpu_id)
            if not gpu:
                return False
            
            # 简单的健康检查
            if (gpu.temperature and gpu.temperature < self.adaptive_thresholds['temperature_warning'] and
                gpu.utilization <= 100 and
                gpu.memory_used <= gpu.memory_total):
                # 重置恢复计数
                self.recovery_attempts[gpu_id] = 0
                return True
            
            return False
        
        except Exception as e:
            logger.error(f"GPU recovery attempt failed for GPU {gpu_id}: {e}")
            return False
    
    async def _update_performance_metrics(self):
        """更新性能指标"""
        try:
            for gpu_id, gpu in self.gpus.items():
                if gpu_id not in self.performance_metrics:
                    continue
                
                metrics = self.performance_metrics[gpu_id]
                
                # 更新平均利用率
                if gpu_id in self.usage_history and self.usage_history[gpu_id]:
                    recent_stats = self.usage_history[gpu_id][-10:]  # 最近10个数据点
                    utilizations = [s.utilization for s in recent_stats]
                    if utilizations:
                        metrics['avg_utilization'] = sum(utilizations) / len(utilizations)
                        metrics['peak_utilization'] = max(utilizations)
                
                # 更新内存使用指标
                memory_usage_percent = (gpu.memory_used / gpu.memory_total) * 100
                metrics['avg_memory_usage'] = memory_usage_percent
                if memory_usage_percent > metrics.get('peak_memory_usage', 0):
                    metrics['peak_memory_usage'] = memory_usage_percent
                
                # 更新温度指标
                if gpu.temperature:
                    metrics['avg_temperature'] = gpu.temperature
                    if gpu.temperature > metrics.get('peak_temperature', 0):
                        metrics['peak_temperature'] = gpu.temperature
        
        except Exception as e:
            logger.error(f"Failed to update performance metrics: {e}")
    
    async def _update_throughput_stats(self):
        """更新吞吐量统计"""
        try:
            current_time = datetime.now()
            
            for gpu_id in self.gpus.keys():
                if gpu_id not in self.load_balancer_stats:
                    continue
                
                stats = self.load_balancer_stats[gpu_id]
                
                # 计算每秒任务数
                if stats['last_task_time']:
                    time_diff = (current_time - stats['last_task_time']).total_seconds()
                    if time_diff > 0:
                        tasks_per_second = stats['total_tasks'] / max(time_diff, 1)
                        self.throughput_stats[gpu_id].append(tasks_per_second)
                        
                        # 更新性能指标
                        if gpu_id in self.performance_metrics:
                            self.performance_metrics[gpu_id]['tasks_per_second'] = tasks_per_second
        
        except Exception as e:
            logger.error(f"Failed to update throughput stats: {e}")
    
    async def _detect_performance_anomalies(self):
        """检测性能异常"""
        try:
            for gpu_id, gpu in self.gpus.items():
                # 检测利用率异常
                if gpu.utilization > self.adaptive_thresholds['utilization_critical']:
                    logger.warning(f"GPU {gpu_id} utilization critical: {gpu.utilization}%")
                
                # 检测内存使用异常
                memory_usage = (gpu.memory_used / gpu.memory_total) * 100
                if memory_usage > self.adaptive_thresholds['memory_usage_critical'] * 100:
                    logger.warning(f"GPU {gpu_id} memory usage critical: {memory_usage:.1f}%")
                
                # 检测温度异常
                if gpu.temperature and gpu.temperature > self.adaptive_thresholds['temperature_critical']:
                    logger.warning(f"GPU {gpu_id} temperature critical: {gpu.temperature}°C")
                
                # 检测吞吐量下降
                if gpu_id in self.throughput_stats and len(self.throughput_stats[gpu_id]) > 5:
                    recent_throughput = list(self.throughput_stats[gpu_id])[-5:]
                    avg_throughput = sum(recent_throughput) / len(recent_throughput)
                    
                    if avg_throughput < 1.0:  # 每秒少于1个任务
                        logger.warning(f"GPU {gpu_id} low throughput: {avg_throughput:.2f} tasks/sec")
        
        except Exception as e:
            logger.error(f"Failed to detect performance anomalies: {e}")
    
    async def _cleanup_loop(self):
        """清理循环"""
        while True:
            try:
                await self._cleanup_expired_allocations()
                await asyncio.sleep(300)  # 每5分钟清理一次
            except Exception as e:
                logger.error(f"Error in cleanup loop: {e}")
                await asyncio.sleep(600)  # 出错时等待更长时间
    
    async def _cleanup_expired_allocations(self):
        """清理过期的资源分配"""
        current_time = datetime.now()
        expired_allocations = []
        
        for allocation_id, allocation in self.allocations.items():
            if allocation.expires_at and allocation.expires_at <= current_time:
                expired_allocations.append(allocation_id)
        
        for allocation_id in expired_allocations:
            await self.release_resource(allocation_id)
            logger.info(f"Cleaned up expired allocation: {allocation_id}")
    
    async def shutdown(self):
        """关闭增强的GPU资源管理器"""
        try:
            # 停止所有监控任务
            tasks_to_cancel = [
                self._monitoring_task,
                self._cleanup_task,
                self._performance_task
            ]
            
            for task in tasks_to_cancel:
                if task and not task.done():
                    task.cancel()
                    try:
                        await task
                    except asyncio.CancelledError:
                        pass
            
            # 释放所有资源分配
            allocation_ids = list(self.allocations.keys())
            for allocation_id in allocation_ids:
                await self.release_resource(allocation_id)
            
            # 清理内存池
            self.memory_pools.clear()
            self.memory_allocation_history.clear()
            
            # 清理性能统计
            self.performance_metrics.clear()
            self.throughput_stats.clear()
            self.latency_stats.clear()
            
            # 关闭NVIDIA ML
            if self.nvidia_initialized:
                try:
                    pynvml.nvmlShutdown()
                except:
                    pass
            
            logger.info("Enhanced GPU Resource Manager shutdown completed")
            
        except Exception as e:
            logger.error(f"Error during Enhanced GPU Resource Manager shutdown: {e}")


# 保持向后兼容性的别名
GPUResourceManager = EnhancedGPUResourceManager

# 全局增强GPU资源管理器实例
gpu_resource_manager = EnhancedGPUResourceManager()