#!/usr/bin/env python3
"""性能监控和优化工具"""

import time
import psutil
import gc
import threading
from functools import wraps
from typing import Dict, Any, Optional, Callable
from contextlib import contextmanager
from pathlib import Path

try:
    import torch
    TORCH_AVAILABLE = True
except ImportError:
    TORCH_AVAILABLE = False

from .logger import get_logger

logger = get_logger(__name__)


class PerformanceMonitor:
    """性能监控器"""
    
    def __init__(self):
        self.metrics = {}
        self.start_times = {}
        self._lock = threading.Lock()
    
    def start_timer(self, name: str) -> None:
        """开始计时"""
        with self._lock:
            self.start_times[name] = time.time()
    
    def end_timer(self, name: str) -> float:
        """结束计时并返回耗时"""
        with self._lock:
            if name not in self.start_times:
                logger.warning(f"Timer '{name}' was not started")
                return 0.0
            
            elapsed = time.time() - self.start_times[name]
            del self.start_times[name]
            
            if name not in self.metrics:
                self.metrics[name] = []
            self.metrics[name].append(elapsed)
            
            return elapsed
    
    def get_average_time(self, name: str) -> float:
        """获取平均耗时"""
        if name not in self.metrics or not self.metrics[name]:
            return 0.0
        return sum(self.metrics[name]) / len(self.metrics[name])
    
    def get_total_time(self, name: str) -> float:
        """获取总耗时"""
        if name not in self.metrics:
            return 0.0
        return sum(self.metrics[name])
    
    def get_call_count(self, name: str) -> int:
        """获取调用次数"""
        if name not in self.metrics:
            return 0
        return len(self.metrics[name])
    
    def reset_metrics(self, name: Optional[str] = None) -> None:
        """重置指标"""
        with self._lock:
            if name:
                self.metrics.pop(name, None)
                self.start_times.pop(name, None)
            else:
                self.metrics.clear()
                self.start_times.clear()
    
    def get_summary(self) -> Dict[str, Dict[str, Any]]:
        """获取性能摘要"""
        summary = {}
        for name in self.metrics:
            summary[name] = {
                'count': self.get_call_count(name),
                'total_time': self.get_total_time(name),
                'average_time': self.get_average_time(name),
                'min_time': min(self.metrics[name]) if self.metrics[name] else 0,
                'max_time': max(self.metrics[name]) if self.metrics[name] else 0
            }
        return summary


class SystemMonitor:
    """系统资源监控器"""
    
    @staticmethod
    def get_memory_info() -> Dict[str, Any]:
        """获取内存信息"""
        memory = psutil.virtual_memory()
        return {
            'total': memory.total,
            'available': memory.available,
            'used': memory.used,
            'percentage': memory.percent,
            'free': memory.free
        }
    
    @staticmethod
    def get_cpu_info() -> Dict[str, Any]:
        """获取CPU信息"""
        return {
            'usage_percent': psutil.cpu_percent(interval=1),
            'count': psutil.cpu_count(),
            'count_logical': psutil.cpu_count(logical=True)
        }
    
    @staticmethod
    def get_disk_info(path: str = '/') -> Dict[str, Any]:
        """获取磁盘信息"""
        try:
            disk = psutil.disk_usage(path)
            return {
                'total': disk.total,
                'used': disk.used,
                'free': disk.free,
                'percentage': (disk.used / disk.total) * 100
            }
        except Exception as e:
            logger.error(f"Failed to get disk info for {path}: {e}")
            return {}
    
    @staticmethod
    def get_gpu_info() -> Dict[str, Any]:
        """获取GPU信息"""
        if not TORCH_AVAILABLE:
            return {'available': False, 'message': 'PyTorch not available'}
        
        try:
            if not torch.cuda.is_available():
                return {'available': False, 'message': 'CUDA not available'}
            
            gpu_count = torch.cuda.device_count()
            current_device = torch.cuda.current_device()
            
            gpu_info = {
                'available': True,
                'count': gpu_count,
                'current_device': current_device,
                'devices': []
            }
            
            for i in range(gpu_count):
                props = torch.cuda.get_device_properties(i)
                memory_allocated = torch.cuda.memory_allocated(i)
                memory_reserved = torch.cuda.memory_reserved(i)
                
                device_info = {
                    'id': i,
                    'name': props.name,
                    'total_memory': props.total_memory,
                    'memory_allocated': memory_allocated,
                    'memory_reserved': memory_reserved,
                    'memory_free': props.total_memory - memory_reserved,
                    'compute_capability': f"{props.major}.{props.minor}"
                }
                gpu_info['devices'].append(device_info)
            
            return gpu_info
            
        except Exception as e:
            logger.error(f"Failed to get GPU info: {e}")
            return {'available': False, 'error': str(e)}
    
    @classmethod
    def get_system_summary(cls) -> Dict[str, Any]:
        """获取系统摘要"""
        return {
            'memory': cls.get_memory_info(),
            'cpu': cls.get_cpu_info(),
            'disk': cls.get_disk_info(),
            'gpu': cls.get_gpu_info()
        }


class MemoryManager:
    """内存管理器"""
    
    @staticmethod
    def clear_cache() -> None:
        """清理缓存"""
        # Python垃圾回收
        gc.collect()
        
        # PyTorch缓存清理
        if TORCH_AVAILABLE and torch.cuda.is_available():
            torch.cuda.empty_cache()
            torch.cuda.synchronize()
    
    @staticmethod
    def get_memory_usage() -> Dict[str, int]:
        """获取当前内存使用情况"""
        import os
        process = psutil.Process(os.getpid())
        memory_info = process.memory_info()
        
        usage = {
            'rss': memory_info.rss,  # 物理内存
            'vms': memory_info.vms,  # 虚拟内存
        }
        
        # 添加GPU内存使用情况
        if TORCH_AVAILABLE and torch.cuda.is_available():
            usage['gpu_allocated'] = torch.cuda.memory_allocated()
            usage['gpu_reserved'] = torch.cuda.memory_reserved()
        
        return usage
    
    @staticmethod
    def check_memory_threshold(threshold_mb: int = 1000) -> bool:
        """检查内存使用是否超过阈值"""
        usage = MemoryManager.get_memory_usage()
        rss_mb = usage['rss'] / (1024 * 1024)
        return rss_mb > threshold_mb
    
    @staticmethod
    @contextmanager
    def memory_limit(max_memory_mb: int):
        """内存限制上下文管理器"""
        initial_usage = MemoryManager.get_memory_usage()
        
        try:
            yield
        finally:
            current_usage = MemoryManager.get_memory_usage()
            memory_increase = (current_usage['rss'] - initial_usage['rss']) / (1024 * 1024)
            
            if memory_increase > max_memory_mb:
                logger.warning(
                    f"Memory usage increased by {memory_increase:.2f}MB, "
                    f"exceeding limit of {max_memory_mb}MB"
                )
                MemoryManager.clear_cache()


class GPUManager:
    """GPU管理器"""
    
    @staticmethod
    def is_available() -> bool:
        """检查GPU是否可用"""
        return TORCH_AVAILABLE and torch.cuda.is_available()
    
    @staticmethod
    def get_optimal_device() -> str:
        """获取最优设备"""
        if not GPUManager.is_available():
            return 'cpu'
        
        # 选择内存最多的GPU
        gpu_info = SystemMonitor.get_gpu_info()
        if not gpu_info.get('available', False):
            return 'cpu'
        
        devices = gpu_info.get('devices', [])
        if not devices:
            return 'cpu'
        
        # 找到可用内存最多的设备
        best_device = max(devices, key=lambda x: x['memory_free'])
        return f"cuda:{best_device['id']}"
    
    @staticmethod
    def set_memory_fraction(fraction: float = 0.8) -> None:
        """设置GPU内存使用比例"""
        if not GPUManager.is_available():
            return
        
        try:
            torch.cuda.set_per_process_memory_fraction(fraction)
            logger.info(f"Set GPU memory fraction to {fraction}")
        except Exception as e:
            logger.error(f"Failed to set GPU memory fraction: {e}")
    
    @staticmethod
    def optimize_for_inference() -> None:
        """为推理优化GPU设置"""
        if not GPUManager.is_available():
            return
        
        try:
            # 启用cudnn benchmark
            torch.backends.cudnn.benchmark = True
            # 禁用梯度计算
            torch.set_grad_enabled(False)
            logger.info("Optimized GPU settings for inference")
        except Exception as e:
            logger.error(f"Failed to optimize GPU settings: {e}")


# 装饰器
def monitor_performance(name: Optional[str] = None):
    """性能监控装饰器"""
    def decorator(func: Callable) -> Callable:
        monitor_name = name or f"{func.__module__}.{func.__name__}"
        
        @wraps(func)
        def wrapper(*args, **kwargs):
            monitor = getattr(wrapper, '_monitor', None)
            if monitor is None:
                monitor = PerformanceMonitor()
                wrapper._monitor = monitor
            
            monitor.start_timer(monitor_name)
            try:
                result = func(*args, **kwargs)
                return result
            finally:
                elapsed = monitor.end_timer(monitor_name)
                logger.debug(f"{monitor_name} took {elapsed:.4f}s")
        
        return wrapper
    return decorator


def memory_efficient(clear_cache: bool = True):
    """内存效率装饰器"""
    def decorator(func: Callable) -> Callable:
        @wraps(func)
        def wrapper(*args, **kwargs):
            initial_usage = MemoryManager.get_memory_usage()
            
            try:
                result = func(*args, **kwargs)
                return result
            finally:
                if clear_cache:
                    MemoryManager.clear_cache()
                
                final_usage = MemoryManager.get_memory_usage()
                memory_diff = (final_usage['rss'] - initial_usage['rss']) / (1024 * 1024)
                
                if abs(memory_diff) > 10:  # 超过10MB变化时记录
                    logger.debug(
                        f"{func.__name__} memory change: {memory_diff:+.2f}MB"
                    )
        
        return wrapper
    return decorator


@contextmanager
def performance_context(name: str, monitor: Optional[PerformanceMonitor] = None):
    """性能监控上下文管理器"""
    if monitor is None:
        monitor = PerformanceMonitor()
    
    monitor.start_timer(name)
    start_memory = MemoryManager.get_memory_usage()
    
    try:
        yield monitor
    finally:
        elapsed = monitor.end_timer(name)
        end_memory = MemoryManager.get_memory_usage()
        memory_diff = (end_memory['rss'] - start_memory['rss']) / (1024 * 1024)
        
        logger.info(
            f"{name} completed in {elapsed:.4f}s, "
            f"memory change: {memory_diff:+.2f}MB"
        )


# 全局性能监控器实例
_global_monitor = PerformanceMonitor()


def get_global_monitor() -> PerformanceMonitor:
    """获取全局性能监控器"""
    return _global_monitor


def log_system_info() -> None:
    """记录系统信息"""
    system_info = SystemMonitor.get_system_summary()
    
    logger.info("=== System Information ===")
    
    # CPU信息
    cpu_info = system_info['cpu']
    logger.info(f"CPU: {cpu_info['count']} cores, {cpu_info['usage_percent']:.1f}% usage")
    
    # 内存信息
    memory_info = system_info['memory']
    memory_gb = memory_info['total'] / (1024**3)
    memory_used_gb = memory_info['used'] / (1024**3)
    logger.info(
        f"Memory: {memory_used_gb:.1f}GB / {memory_gb:.1f}GB "
        f"({memory_info['percentage']:.1f}% used)"
    )
    
    # GPU信息
    gpu_info = system_info['gpu']
    if gpu_info['available']:
        logger.info(f"GPU: {gpu_info['count']} device(s) available")
        for device in gpu_info['devices']:
            memory_gb = device['total_memory'] / (1024**3)
            memory_used_gb = device['memory_allocated'] / (1024**3)
            logger.info(
                f"  - {device['name']}: {memory_used_gb:.1f}GB / {memory_gb:.1f}GB used"
            )
    else:
        logger.info("GPU: Not available")
    
    logger.info("=" * 30)