""" 模型效果跟踪（F1/PPL趋势监控）"""
"""
模型性能追踪器
用于持续监控和记录模型性能指标
"""
import numpy as np
from typing import Dict, Any
import logging
from datetime import datetime, timedelta
import threading

logger = logging.getLogger(__name__)


class PerformanceTracker:
    """模型性能追踪器"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化性能追踪器
        
        Args:
            config: 配置参数
        """
        self.config = config
        self.metrics_history = {}
        self.latency_history = []
        self.current_batch_metrics = {}
        self.lock = threading.Lock()
        
        # 默认要追踪的指标
        self.metrics_to_track = config.get('metrics', ['f1_score', 'precision', 'recall', 'ppl'])
        
    def start_tracking(self) -> None:
        """开始追踪性能"""
        logger.info(f"开始性能追踪，监控指标: {self.metrics_to_track}")
        
    def record_inference_latency(self, start_time: float, end_time: float, model_type: str = 'default') -> float:
        """
        记录推理延迟
        
        Args:
            start_time: 开始时间（秒）
            end_time: 结束时间（秒）
            model_type: 模型类型
            
        Returns:
            延迟（毫秒）
        """
        latency_ms = (end_time - start_time) * 1000
        
        with self.lock:
            self.latency_history.append({
                'timestamp': datetime.now(),
                'latency_ms': latency_ms,
                'model_type': model_type
            })
            
            # 限制历史记录长度
            max_history = self.config.get('max_latency_history', 1000)
            if len(self.latency_history) > max_history:
                self.latency_history = self.latency_history[-max_history:]
                
        return latency_ms
        
    def record_batch_metrics(self, metrics: Dict[str, float], batch_size: int, model_type: str = 'default') -> None:
        """
        记录批次性能指标
        
        Args:
            metrics: 性能指标字典
            batch_size: 批次大小
            model_type: 模型类型
        """
        timestamp = datetime.now()
        
        with self.lock:
            # 更新当前批次指标
            self.current_batch_metrics = {
                'timestamp': timestamp,
                'metrics': metrics,
                'batch_size': batch_size,
                'model_type': model_type
            }
            
            # 记录到历史
            if model_type not in self.metrics_history:
                self.metrics_history[model_type] = []
                
            self.metrics_history[model_type].append({
                'timestamp': timestamp,
                'metrics': metrics,
                'batch_size': batch_size
            })
            
            # 限制历史记录长度
            max_history = self.config.get('max_metrics_history', 100)
            if len(self.metrics_history[model_type]) > max_history:
                self.metrics_history[model_type] = self.metrics_history[model_type][-max_history:]
                
        logger.debug(f"记录批次性能指标: {model_type}, {metrics}")
        
    def get_current_metrics(self) -> Dict[str, Any]:
        """
        获取当前性能指标
        
        Returns:
            当前性能指标
        """
        with self.lock:
            return self.current_batch_metrics.copy()
            
    def get_average_metrics(self, model_type: str = 'default', window_size: int = 10) -> Dict[str, float]:
        """
        获取平均性能指标
        
        Args:
            model_type: 模型类型
            window_size: 窗口大小
            
        Returns:
            平均性能指标
        """
        with self.lock:
            if model_type not in self.metrics_history or len(self.metrics_history[model_type]) == 0:
                return {}
                
            # 获取最近的批次
            recent_batches = self.metrics_history[model_type][-window_size:]
            
            # 计算平均指标
            avg_metrics = {}
            metric_keys = set()
            
            for batch in recent_batches:
                metric_keys.update(batch['metrics'].keys())
                
            for key in metric_keys:
                values = [batch['metrics'].get(key, 0.0) for batch in recent_batches]
                avg_metrics[key] = np.mean(values)
                
            return avg_metrics
            
    def get_latency_statistics(self, model_type: str = None, time_window: int = 60) -> Dict[str, float]:
        """
        获取延迟统计信息
        
        Args:
            model_type: 模型类型，None表示所有模型
            time_window: 时间窗口（秒）
            
        Returns:
            延迟统计信息
        """
        cutoff_time = datetime.now() - timedelta(seconds=time_window)
        
        with self.lock:
            # 过滤时间窗口内的数据
            filtered_latencies = []
            
            for record in self.latency_history:
                if record['timestamp'] >= cutoff_time and (model_type is None or record['model_type'] == model_type):
                    filtered_latencies.append(record['latency_ms'])
                    
            if not filtered_latencies:
                return {
                    'count': 0,
                    'min': 0.0,
                    'max': 0.0,
                    'avg': 0.0,
                    'p50': 0.0,
                    'p90': 0.0,
                    'p95': 0.0,
                    'p99': 0.0
                }
                
            # 计算统计信息
            latencies = np.array(filtered_latencies)
            
            return {
                'count': len(latencies),
                'min': np.min(latencies),
                'max': np.max(latencies),
                'avg': np.mean(latencies),
                'p50': np.percentile(latencies, 50),
                'p90': np.percentile(latencies, 90),
                'p95': np.percentile(latencies, 95),
                'p99': np.percentile(latencies, 99)
            }
            
    def calculate_throughput(self, time_window: int = 60) -> float:
        """
        计算吞吐量（每秒处理的请求数）
        
        Args:
            time_window: 时间窗口（秒）
            
        Returns:
            吞吐量
        """
        cutoff_time = datetime.now() - timedelta(seconds=time_window)
        
        with self.lock:
            # 计算时间窗口内处理的请求总数
            total_requests = 0
            
            for record in self.latency_history:
                if record['timestamp'] >= cutoff_time:
                    total_requests += 1
                    
            # 计算吞吐量
            throughput = total_requests / time_window if time_window > 0 else 0.0
            
            return throughput
            
    def generate_performance_report(self, time_window: int = 3600) -> Dict[str, Any]:
        """
        生成性能报告
        
        Args:
            time_window: 时间窗口（秒）
            
        Returns:
            性能报告
        """
        report = {
            'timestamp': datetime.now(),
            'time_window': time_window,
            'throughput': self.calculate_throughput(time_window),
            'latency': {},
            'model_metrics': {}
        }
        
        # 记录每个模型类型的延迟统计
        model_types = set()
        for record in self.latency_history:
            model_types.add(record['model_type'])
            
        for model_type in model_types:
            report['latency'][model_type] = self.get_latency_statistics(model_type, time_window)
            
        # 记录每个模型类型的平均性能指标
        for model_type in self.metrics_history:
            report['model_metrics'][model_type] = self.get_average_metrics(model_type)
            
        logger.info(f"生成性能报告，时间窗口: {time_window}秒")
        
        return report
        
    def reset(self) -> None:
        """
        重置性能追踪器
        """
        with self.lock:
            self.metrics_history = {}
            self.latency_history = []
            self.current_batch_metrics = {}
            
        logger.info("性能追踪器已重置")


# 全局性能追踪器实例
from src.utils.config_loader import get_config

config = get_config()
performance_tracker = PerformanceTracker(config.get('monitor', {}).get('model', {}).get('performance_tracking', {}))