"""
Scoring and comparison logic for benchmark reports.
基准测试报告评分与对比核心逻辑
"""

from typing import Dict, List, Optional
import statistics


class BenchmarkScorer:
    """
    Calculate scores and comparisons for benchmark reports.
    计算基准测试报告的多维评分与对比
    """
    
    # Default weights for scoring dimensions
    # 默认各维度评分权重
    DEFAULT_WEIGHTS = {
        'performance': 0.40,
        'efficiency': 0.30,
        'resource': 0.15,
        'stability': 0.15
    }
    
    # Weights when power data is unavailable（无能耗数据时用权重，效率权重为0）
    NO_POWER_WEIGHTS = {
        'performance': 0.57,
        'efficiency': 0.0,
        'resource': 0.22,
        'stability': 0.21
    }
    
    def __init__(self, reports: Dict[str, Dict], baseline_id: Optional[str] = None):
        """
        Initialize scorer.
        初始化评分器
        
        Args:
            reports: Dict mapping report_id to report data
                报告ID到报告内容的映射字典
            baseline_id: ID of baseline report (auto-select if None)
                基准报告ID（为None时自动选择）
        """
        self.reports = reports
        # 初始化时，如未指定基准，自动选择一个基准
        self.baseline_id = baseline_id or self._auto_select_baseline()
        
    def _auto_select_baseline(self) -> Optional[str]:
        """
        Auto-select baseline as device with median QPS.
        自动选择QPS中位数的报告作为基准
        """
        if not self.reports:
            return None
        
        # Calculate average QPS for each report
        # 计算每个报告所有模型的QPS均值
        qps_scores = {}
        for report_id, report in self.reports.items():
            models = report.get('models', {})
            if models:
                avg_qps = statistics.mean(
                    m.get('qps', 0) for m in models.values()
                )
                qps_scores[report_id] = avg_qps
        
        # 如全部报告无QPS信息，则选第一个报告为基准
        if not qps_scores:
            return list(self.reports.keys())[0]
        
        # Return report with median QPS
        # 返回QPS排名中值的报告ID
        sorted_ids = sorted(qps_scores.items(), key=lambda x: x[1])
        median_idx = len(sorted_ids) // 2
        return sorted_ids[median_idx][0]
    
    def calculate_scores(
        self, 
        report_ids: Optional[List[str]] = None,
        models: Optional[List[str]] = None,
        weights: Optional[Dict[str, float]] = None
    ) -> Dict:
        """
        Calculate scores for all reports.
        计算所有报告的评分
        
        Args:
            report_ids: List of report IDs to score (all if None)
                要评分的报告ID列表（None时为全部报告）
            models: List of model names to include (all if None)
                参与评分的模型名称（None时为全部模型）
            weights: Custom weights for dimensions
                各评分维度自定义权重
            
        Returns:
            Dict with baseline info and scores list
            返回带有基准信息和分数组成的字典
        """
        # 默认为所有已加载报告
        if report_ids is None:
            report_ids = list(self.reports.keys())
        
        # 权重未指定则用默认值
        if weights is None:
            weights = self.DEFAULT_WEIGHTS.copy()
        
        # Get baseline stats
        # 计算基准设备各项指标统计值
        baseline_stats = self._compute_baseline_stats(models)
        
        # Check if power data is available
        # 判断是否有有效能耗数据（perf_per_W）
        has_power = baseline_stats.get('has_power', False)
        if not has_power and weights == self.DEFAULT_WEIGHTS:
            # 若无能耗数据且无自定义权重，则切换为无power默认权重
            weights = self.NO_POWER_WEIGHTS.copy()
        
        # Calculate scores for each report
        # 对每个选定报告计算总分
        scores = []
        for report_id in report_ids:
            if report_id not in self.reports:
                continue
            
            score_data = self._calculate_report_score(
                report_id, 
                baseline_stats, 
                models, 
                weights,
                has_power
            )
            scores.append(score_data)
        
        # Sort by total score descending
        # 按照总分降序排序
        scores.sort(key=lambda x: x['total_score'], reverse=True)
        
        return {
            'baseline': self.baseline_id,
            'weights': weights,
            'has_power': has_power,
            'scores': scores
        }
    
    def _compute_baseline_stats(self, models: Optional[List[str]] = None) -> Dict:
        """
        Compute baseline statistics for scoring.
        统计基准报告的各项指标均值，用于后续的归一化评分
        """
        baseline = self.reports.get(self.baseline_id, {})
        baseline_models = baseline.get('models', {})
        
        # 只统计选定模型
        if models:
            baseline_models = {
                k: v for k, v in baseline_models.items() 
                if k in models
            }
        
        if not baseline_models:
            return {}
        
        # Aggregate metrics across models
        # 指标累加表
        stats = {
            'qps': [],
            'lat_avg_ms': [],
            'p50_ms': [],
            'p99_ms': [],
            'rss_mb': [],
            'vram_mb': [],
            'perf_per_W': [],
            'has_power': False
        }
        
        # 收集每个模型的各项指标
        for model_data in baseline_models.values():
            stats['qps'].append(model_data.get('qps', 0))
            stats['lat_avg_ms'].append(model_data.get('lat_avg_ms', 0))
            stats['p50_ms'].append(model_data.get('p50_ms', 0))
            stats['p99_ms'].append(model_data.get('p99_ms', 0))
            stats['rss_mb'].append(model_data.get('rss_mb', 0))
            
            vram = model_data.get('vram_mb')
            if vram is not None:
                stats['vram_mb'].append(vram)
            
            perf_per_w = model_data.get('perf_per_W')
            if perf_per_w is not None and perf_per_w > 0:
                stats['perf_per_W'].append(perf_per_w)
                stats['has_power'] = True
        
        # Calculate averages
        # 计算均值
        result = {}
        for key, values in stats.items():
            if key == 'has_power':
                result[key] = values
            elif values:
                result[key] = statistics.mean(values)
            else:
                result[key] = 0
        
        return result
    
    def _calculate_report_score(
        self,
        report_id: str,
        baseline_stats: Dict,
        models: Optional[List[str]],
        weights: Dict[str, float],
        has_power: bool
    ) -> Dict:
        """
        Calculate score for a single report.
        计算单份报告的全维度分数
        """
        report = self.reports[report_id]
        report_models = report.get('models', {})
        
        # 只统计指定模型
        if models:
            report_models = {
                k: v for k, v in report_models.items() 
                if k in models
            }
        
        # Aggregate metrics
        # 计算各项指标均值
        device_stats = self._aggregate_model_stats(report_models, has_power)
        
        # Calculate dimension scores
        # 逐维度计算分数
        perf_score = self._calc_performance_score(device_stats, baseline_stats)
        efficiency_score = self._calc_efficiency_score(device_stats, baseline_stats) if has_power else 0
        resource_score = self._calc_resource_score(device_stats, baseline_stats)
        stability_score = self._calc_stability_score(device_stats, baseline_stats)
        
        # Calculate total score
        # 汇总加权总分
        total_score = (
            perf_score * weights['performance'] +
            efficiency_score * weights['efficiency'] +
            resource_score * weights['resource'] +
            stability_score * weights['stability']
        )
        
        return {
            'id': report_id,
            'total_score': round(total_score, 2),
            'performance_score': round(perf_score, 2),
            'efficiency_score': round(efficiency_score, 2) if has_power else None,
            'resource_score': round(resource_score, 2),
            'stability_score': round(stability_score, 2),
            'models': report_models,
            'stats': device_stats
        }
    
    def _aggregate_model_stats(self, models: Dict, has_power: bool) -> Dict:
        """
        Aggregate statistics across models.
        统计多个模型的各项指标（均值）
        """
        if not models:
            return {}
        
        stats = {
            'qps': [],
            'lat_avg_ms': [],
            'p50_ms': [],
            'p99_ms': [],
            'rss_mb': [],
            'vram_mb': [],
            'perf_per_W': []
        }
        
        # 汇总各模型指标
        for model_data in models.values():
            stats['qps'].append(model_data.get('qps', 0))
            stats['lat_avg_ms'].append(model_data.get('lat_avg_ms', 0))
            stats['p50_ms'].append(model_data.get('p50_ms', 0))
            stats['p99_ms'].append(model_data.get('p99_ms', 0))
            stats['rss_mb'].append(model_data.get('rss_mb', 0))
            
            vram = model_data.get('vram_mb')
            if vram is not None:
                stats['vram_mb'].append(vram)
            
            if has_power:
                perf_per_w = model_data.get('perf_per_W')
                if perf_per_w is not None:
                    stats['perf_per_W'].append(perf_per_w)
        
        # Calculate averages
        # 计算每项指标均值
        result = {}
        for key, values in stats.items():
            if values:
                result[key] = statistics.mean(values)
            else:
                result[key] = 0
        
        return result
    
    def _calc_performance_score(self, device: Dict, baseline: Dict) -> float:
        """
        Calculate performance score (QPS 70% + latency 30%).
        性能分=QPS归一分*0.7+时延归一分*0.3，满分100
        """
        if not baseline.get('qps') or not baseline.get('lat_avg_ms'):
            return 100.0
        
        # QPS归一得分
        qps_score = (device.get('qps', 0) / baseline['qps']) * 100
        # 时延归一得分
        lat_score = (baseline['lat_avg_ms'] / max(device.get('lat_avg_ms', 1), 0.001)) * 100
        
        return qps_score * 0.7 + lat_score * 0.3
    
    def _calc_efficiency_score(self, device: Dict, baseline: Dict) -> float:
        """
        Calculate efficiency score (perf per watt).
        能效分=单位功耗性能归一分
        """
        if not baseline.get('perf_per_W') or not device.get('perf_per_W'):
            return 100.0
        
        return (device['perf_per_W'] / baseline['perf_per_W']) * 100
    
    def _calc_resource_score(self, device: Dict, baseline: Dict) -> float:
        """
        Calculate resource score (memory efficiency).
        资源分=内存/显存归一分，满分100
        """
        if not baseline.get('rss_mb'):
            return 100.0
        
        rss_score = (baseline['rss_mb'] / max(device.get('rss_mb', 1), 0.001)) * 100
        
        # Include VRAM if available; 若有显存数据，两者均分
        if baseline.get('vram_mb') and device.get('vram_mb'):
            vram_score = (baseline['vram_mb'] / device['vram_mb']) * 100
            return rss_score * 0.5 + vram_score * 0.5
        
        return rss_score
    
    def _calc_stability_score(self, device: Dict, baseline: Dict) -> float:
        """
        Calculate stability score (tail latency gap).
        稳定性分=尾部时延gap归一分，gap越小越好
        """
        device_gap = device.get('p99_ms', 0) - device.get('p50_ms', 0)
        baseline_gap = baseline.get('p99_ms', 0) - baseline.get('p50_ms', 0)
        
        if device_gap <= 0 or baseline_gap <= 0:
            return 100.0
        
        return (baseline_gap / device_gap) * 100
    