"""
缓存监控和管理工具

提供缓存性能监控、统计分析和管理功能
"""

import time
import threading
from datetime import datetime, timedelta
from typing import Dict, List, Any, Optional
import logging
from dataclasses import dataclass
import json

from .data_cache import get_cache_manager, CacheLevel

logger = logging.getLogger(__name__)


@dataclass
class CachePerformanceMetrics:
    """缓存性能指标"""
    timestamp: datetime
    hit_rate: float
    total_requests: int
    cache_size: int
    memory_usage: int
    avg_response_time: float
    top_accessed_keys: List[str]


class CacheMonitor:
    """缓存监控器"""
    
    def __init__(self, monitor_interval: int = 60):
        self.monitor_interval = monitor_interval  # 监控间隔（秒）
        self.cache_manager = get_cache_manager()
        self.metrics_history: List[CachePerformanceMetrics] = []
        self.is_monitoring = False
        self.monitor_thread = None
        self._lock = threading.Lock()
        
    def start_monitoring(self):
        """开始监控"""
        if self.is_monitoring:
            logger.warning("缓存监控已在运行")
            return
        
        self.is_monitoring = True
        self.monitor_thread = threading.Thread(target=self._monitor_worker, daemon=True)
        self.monitor_thread.start()
        logger.info(f"缓存监控已启动，监控间隔: {self.monitor_interval}秒")
    
    def stop_monitoring(self):
        """停止监控"""
        self.is_monitoring = False
        if self.monitor_thread:
            self.monitor_thread.join(timeout=5)
        logger.info("缓存监控已停止")
    
    def _monitor_worker(self):
        """监控工作线程"""
        while self.is_monitoring:
            try:
                metrics = self._collect_metrics()
                with self._lock:
                    self.metrics_history.append(metrics)
                    # 保留最近24小时的数据
                    cutoff_time = datetime.now() - timedelta(hours=24)
                    self.metrics_history = [
                        m for m in self.metrics_history 
                        if m.timestamp > cutoff_time
                    ]
                
                time.sleep(self.monitor_interval)
                
            except Exception as e:
                logger.error(f"缓存监控异常: {e}")
                time.sleep(self.monitor_interval)
    
    def _collect_metrics(self) -> CachePerformanceMetrics:
        """收集缓存指标"""
        stats = self.cache_manager.get_stats()
        
        # 计算平均响应时间（模拟）
        avg_response_time = 0.1  # 默认值
        
        # 获取访问最多的缓存键
        top_keys = sorted(
            stats.get('entries', []),
            key=lambda x: x.get('access_count', 0),
            reverse=True
        )[:5]
        
        top_accessed_keys = [entry['key'][:30] + '...' if len(entry['key']) > 30 else entry['key'] 
                           for entry in top_keys]
        
        return CachePerformanceMetrics(
            timestamp=datetime.now(),
            hit_rate=stats.get('hit_rate', 0),
            total_requests=stats.get('hits', 0) + stats.get('misses', 0),
            cache_size=stats.get('cache_size', 0),
            memory_usage=sum(entry.get('size', 0) for entry in stats.get('entries', [])),
            avg_response_time=avg_response_time,
            top_accessed_keys=top_accessed_keys
        )
    
    def get_current_metrics(self) -> CachePerformanceMetrics:
        """获取当前缓存指标"""
        return self._collect_metrics()
    
    def get_metrics_history(self, hours: int = 1) -> List[CachePerformanceMetrics]:
        """获取指定时间范围内的指标历史"""
        cutoff_time = datetime.now() - timedelta(hours=hours)
        with self._lock:
            return [m for m in self.metrics_history if m.timestamp > cutoff_time]
    
    def analyze_performance(self) -> Dict[str, Any]:
        """分析缓存性能"""
        if not self.metrics_history:
            return {"error": "没有足够的监控数据"}
        
        recent_metrics = self.get_metrics_history(hours=1)
        if not recent_metrics:
            return {"error": "没有最近1小时的数据"}
        
        # 计算性能趋势
        hit_rates = [m.hit_rate for m in recent_metrics]
        cache_sizes = [m.cache_size for m in recent_metrics]
        memory_usage = [m.memory_usage for m in recent_metrics]
        
        analysis = {
            "time_range": f"最近{len(recent_metrics)}个数据点",
            "hit_rate": {
                "current": hit_rates[-1] if hit_rates else 0,
                "average": sum(hit_rates) / len(hit_rates) if hit_rates else 0,
                "trend": "上升" if len(hit_rates) > 1 and hit_rates[-1] > hit_rates[0] else "下降"
            },
            "cache_size": {
                "current": cache_sizes[-1] if cache_sizes else 0,
                "max": max(cache_sizes) if cache_sizes else 0,
                "average": sum(cache_sizes) / len(cache_sizes) if cache_sizes else 0
            },
            "memory_usage": {
                "current": memory_usage[-1] if memory_usage else 0,
                "max": max(memory_usage) if memory_usage else 0,
                "average": sum(memory_usage) / len(memory_usage) if memory_usage else 0
            },
            "recommendations": self._generate_recommendations(recent_metrics)
        }
        
        return analysis
    
    def _generate_recommendations(self, metrics: List[CachePerformanceMetrics]) -> List[str]:
        """生成优化建议"""
        recommendations = []
        
        if not metrics:
            return recommendations
        
        latest = metrics[-1]
        
        # 命中率建议
        if latest.hit_rate < 50:
            recommendations.append("缓存命中率较低，考虑增加缓存时间或优化缓存策略")
        elif latest.hit_rate > 90:
            recommendations.append("缓存命中率很高，系统性能良好")
        
        # 缓存大小建议
        if latest.cache_size > 1000:
            recommendations.append("缓存条目较多，考虑定期清理过期缓存")
        elif latest.cache_size < 10:
            recommendations.append("缓存条目较少，可能需要预热缓存")
        
        # 内存使用建议
        if latest.memory_usage > 100 * 1024 * 1024:  # 100MB
            recommendations.append("缓存内存使用较高，考虑优化数据结构或清理大对象")
        
        return recommendations
    
    def export_metrics(self, filename: str = None) -> str:
        """导出监控数据"""
        if filename is None:
            filename = f"cache_metrics_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
        
        export_data = {
            "export_time": datetime.now().isoformat(),
            "metrics_count": len(self.metrics_history),
            "metrics": [
                {
                    "timestamp": m.timestamp.isoformat(),
                    "hit_rate": m.hit_rate,
                    "total_requests": m.total_requests,
                    "cache_size": m.cache_size,
                    "memory_usage": m.memory_usage,
                    "avg_response_time": m.avg_response_time,
                    "top_accessed_keys": m.top_accessed_keys
                }
                for m in self.metrics_history
            ]
        }
        
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(export_data, f, ensure_ascii=False, indent=2)
            
            logger.info(f"缓存监控数据已导出: {filename}")
            return filename
            
        except Exception as e:
            logger.error(f"导出监控数据失败: {e}")
            raise
    
    def print_performance_report(self):
        """打印性能报告"""
        current_metrics = self.get_current_metrics()
        analysis = self.analyze_performance()
        
        print("\n" + "="*60)
        print("缓存性能报告")
        print("="*60)
        
        print(f"当前指标:")
        print(f"  命中率: {current_metrics.hit_rate:.1f}%")
        print(f"  总请求数: {current_metrics.total_requests}")
        print(f"  缓存大小: {current_metrics.cache_size} 项")
        print(f"  内存使用: {current_metrics.memory_usage / 1024:.1f} KB")
        
        if current_metrics.top_accessed_keys:
            print(f"\n热门缓存键:")
            for i, key in enumerate(current_metrics.top_accessed_keys[:3], 1):
                print(f"  {i}. {key}")
        
        if "error" not in analysis:
            print(f"\n性能分析:")
            hit_rate_info = analysis["hit_rate"]
            print(f"  平均命中率: {hit_rate_info['average']:.1f}%")
            print(f"  命中率趋势: {hit_rate_info['trend']}")
            
            cache_info = analysis["cache_size"]
            print(f"  平均缓存大小: {cache_info['average']:.0f} 项")
            print(f"  最大缓存大小: {cache_info['max']} 项")
            
            if analysis["recommendations"]:
                print(f"\n优化建议:")
                for i, rec in enumerate(analysis["recommendations"], 1):
                    print(f"  {i}. {rec}")
        
        print("="*60)


# 全局缓存监控器实例
_global_cache_monitor = None
_monitor_lock = threading.Lock()


def get_cache_monitor() -> CacheMonitor:
    """获取全局缓存监控器实例"""
    global _global_cache_monitor
    
    if _global_cache_monitor is None:
        with _monitor_lock:
            if _global_cache_monitor is None:
                _global_cache_monitor = CacheMonitor()
                logger.info("缓存监控器初始化完成")
    
    return _global_cache_monitor


def start_cache_monitoring(interval: int = 60):
    """启动缓存监控"""
    monitor = get_cache_monitor()
    monitor.monitor_interval = interval
    monitor.start_monitoring()


def stop_cache_monitoring():
    """停止缓存监控"""
    monitor = get_cache_monitor()
    monitor.stop_monitoring()


def print_cache_performance():
    """打印缓存性能报告"""
    monitor = get_cache_monitor()
    monitor.print_performance_report()
