"""
分类性能评估器

评估AI分类器的性能和准确率。
"""

import asyncio
import time
from typing import Dict, List, Any, Optional, Tuple
from datetime import datetime
import logging
import json
from pathlib import Path

from src.core.di import Injectable, Inject
from src.modules.scanners.base import FileInfo, FileCategory
from .base import AIClassifier, ClassificationResult


@Injectable(scope="singleton")
class ClassificationPerformanceEvaluator:
    """
    分类性能评估器
    
    评估AI分类器的性能、准确率和效率。
    """
    
    def __init__(self,
                 config: Dict[str, Any] = Inject("config"),
                 logger: logging.Logger = Inject("logger")):
        self.config = config
        self.logger = logger
        
        # 评估配置
        self.eval_config = config.get("performance_evaluator", {})
        self.save_results = self.eval_config.get("save_results", True)
        self.results_dir = Path(self.eval_config.get("results_dir", "output/evaluation"))
        
        # 创建结果目录
        if self.save_results:
            self.results_dir.mkdir(parents=True, exist_ok=True)
        
        # 性能指标
        self.metrics = {
            "accuracy": 0.0,
            "precision": {},
            "recall": {},
            "f1_score": {},
            "confusion_matrix": {},
            "average_confidence": 0.0,
            "average_processing_time": 0.0,
            "success_rate": 0.0
        }
    
    async def evaluate_classifier(self, 
                                classifier: AIClassifier, 
                                test_files: List[FileInfo],
                                ground_truth: Dict[str, FileCategory] = None,
                                **kwargs) -> Dict[str, Any]:
        """
        评估分类器性能
        
        Args:
            classifier: 要评估的分类器
            test_files: 测试文件列表
            ground_truth: 真实标签（文件路径 -> 分类）
            **kwargs: 其他参数
        
        Returns:
            评估结果
        """
        self.logger.info(f"开始评估分类器: {classifier.name}")
        
        start_time = time.time()
        
        # 分类所有测试文件
        classification_results = []
        processing_times = []
        
        for file_info in test_files:
            try:
                file_start_time = time.time()
                result = await classifier.classify(file_info, **kwargs)
                processing_time = time.time() - file_start_time
                
                classification_results.append(result)
                processing_times.append(processing_time)
                
            except Exception as e:
                self.logger.error(f"分类文件失败 {file_info.name}: {e}")
                # 创建失败结果
                failed_result = ClassificationResult(
                    success=False,
                    error=str(e),
                    classifier_type=classifier.name,
                    timestamp=datetime.now()
                )
                classification_results.append(failed_result)
                processing_times.append(0.0)
        
        # 计算基础统计
        total_files = len(test_files)
        successful_classifications = len([r for r in classification_results if r.success])
        success_rate = successful_classifications / total_files if total_files > 0 else 0.0
        
        # 计算平均处理时间
        avg_processing_time = sum(processing_times) / len(processing_times) if processing_times else 0.0
        
        # 计算平均置信度
        confidences = [r.confidence for r in classification_results if r.success]
        avg_confidence = sum(confidences) / len(confidences) if confidences else 0.0
        
        # 如果有真实标签，计算准确率指标
        accuracy_metrics = {}
        if ground_truth:
            accuracy_metrics = self._calculate_accuracy_metrics(
                classification_results, test_files, ground_truth
            )
        
        # 分析分类分布
        category_distribution = self._analyze_category_distribution(classification_results)
        
        # 分析错误模式
        error_analysis = self._analyze_errors(classification_results)
        
        # 生成评估报告
        evaluation_result = {
            "classifier_info": {
                "name": classifier.name,
                "version": classifier.version,
                "supported_file_types": classifier.supported_file_types
            },
            "test_summary": {
                "total_files": total_files,
                "successful_classifications": successful_classifications,
                "success_rate": success_rate,
                "evaluation_time": time.time() - start_time
            },
            "performance_metrics": {
                "average_confidence": avg_confidence,
                "average_processing_time": avg_processing_time,
                "throughput": total_files / (time.time() - start_time)
            },
            "accuracy_metrics": accuracy_metrics,
            "category_distribution": category_distribution,
            "error_analysis": error_analysis,
            "detailed_results": [r.to_dict() for r in classification_results],
            "evaluation_timestamp": datetime.now().isoformat()
        }
        
        # 保存评估结果
        if self.save_results:
            await self._save_evaluation_results(classifier.name, evaluation_result)
        
        self.logger.info(
            f"分类器评估完成: {classifier.name} "
            f"(成功率: {success_rate:.2%}, 平均置信度: {avg_confidence:.2f})"
        )
        
        return evaluation_result
    
    def _calculate_accuracy_metrics(self, 
                                  results: List[ClassificationResult],
                                  test_files: List[FileInfo],
                                  ground_truth: Dict[str, FileCategory]) -> Dict[str, Any]:
        """计算准确率指标"""
        
        # 构建预测和真实标签列表
        y_true = []
        y_pred = []
        
        for i, result in enumerate(results):
            if not result.success:
                continue
                
            file_path = test_files[i].path
            if file_path in ground_truth:
                y_true.append(ground_truth[file_path].value)
                y_pred.append(result.category.value)
        
        if not y_true:
            return {"error": "没有有效的真实标签进行准确率计算"}
        
        # 计算整体准确率
        correct_predictions = sum(1 for true, pred in zip(y_true, y_pred) if true == pred)
        accuracy = correct_predictions / len(y_true)
        
        # 获取所有类别
        all_categories = list(set(y_true + y_pred))
        
        # 计算每个类别的精确率、召回率和F1分数
        precision = {}
        recall = {}
        f1_score = {}
        confusion_matrix = {}
        
        for category in all_categories:
            # 计算TP, FP, FN
            tp = sum(1 for true, pred in zip(y_true, y_pred) if true == category and pred == category)
            fp = sum(1 for true, pred in zip(y_true, y_pred) if true != category and pred == category)
            fn = sum(1 for true, pred in zip(y_true, y_pred) if true == category and pred != category)
            
            # 精确率
            precision[category] = tp / (tp + fp) if (tp + fp) > 0 else 0.0
            
            # 召回率
            recall[category] = tp / (tp + fn) if (tp + fn) > 0 else 0.0
            
            # F1分数
            p = precision[category]
            r = recall[category]
            f1_score[category] = 2 * (p * r) / (p + r) if (p + r) > 0 else 0.0
            
            # 混淆矩阵行
            confusion_matrix[category] = {}
            for pred_category in all_categories:
                confusion_matrix[category][pred_category] = sum(
                    1 for true, pred in zip(y_true, y_pred) 
                    if true == category and pred == pred_category
                )
        
        # 计算宏平均
        macro_precision = sum(precision.values()) / len(precision) if precision else 0.0
        macro_recall = sum(recall.values()) / len(recall) if recall else 0.0
        macro_f1 = sum(f1_score.values()) / len(f1_score) if f1_score else 0.0
        
        return {
            "accuracy": accuracy,
            "precision": precision,
            "recall": recall,
            "f1_score": f1_score,
            "macro_precision": macro_precision,
            "macro_recall": macro_recall,
            "macro_f1": macro_f1,
            "confusion_matrix": confusion_matrix,
            "total_samples": len(y_true)
        }
    
    def _analyze_category_distribution(self, results: List[ClassificationResult]) -> Dict[str, Any]:
        """分析分类分布"""
        
        successful_results = [r for r in results if r.success]
        
        if not successful_results:
            return {"error": "没有成功的分类结果"}
        
        # 统计各类别数量
        category_counts = {}
        confidence_by_category = {}
        
        for result in successful_results:
            category = result.category.value
            category_counts[category] = category_counts.get(category, 0) + 1
            
            if category not in confidence_by_category:
                confidence_by_category[category] = []
            confidence_by_category[category].append(result.confidence)
        
        # 计算各类别的平均置信度
        avg_confidence_by_category = {}
        for category, confidences in confidence_by_category.items():
            avg_confidence_by_category[category] = sum(confidences) / len(confidences)
        
        # 计算分布百分比
        total_count = len(successful_results)
        category_percentages = {
            category: count / total_count * 100 
            for category, count in category_counts.items()
        }
        
        return {
            "category_counts": category_counts,
            "category_percentages": category_percentages,
            "average_confidence_by_category": avg_confidence_by_category,
            "total_successful_classifications": total_count
        }
    
    def _analyze_errors(self, results: List[ClassificationResult]) -> Dict[str, Any]:
        """分析错误模式"""
        
        failed_results = [r for r in results if not r.success]
        
        if not failed_results:
            return {"total_errors": 0, "error_types": {}}
        
        # 统计错误类型
        error_types = {}
        for result in failed_results:
            error_msg = result.error
            # 简化错误消息以便分组
            if "不支持的文件类型" in error_msg:
                error_type = "unsupported_file_type"
            elif "服务不可用" in error_msg:
                error_type = "service_unavailable"
            elif "API" in error_msg:
                error_type = "api_error"
            elif "超时" in error_msg or "timeout" in error_msg.lower():
                error_type = "timeout"
            else:
                error_type = "other"
            
            error_types[error_type] = error_types.get(error_type, 0) + 1
        
        return {
            "total_errors": len(failed_results),
            "error_rate": len(failed_results) / len(results),
            "error_types": error_types,
            "error_details": [{"error": r.error, "timestamp": r.timestamp.isoformat()} for r in failed_results[:10]]  # 只保存前10个错误详情
        }
    
    async def _save_evaluation_results(self, classifier_name: str, results: Dict[str, Any]) -> None:
        """保存评估结果"""
        try:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"evaluation_{classifier_name}_{timestamp}.json"
            
            output_path = self.results_dir / filename
            
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(results, f, indent=2, ensure_ascii=False, default=str)
            
            self.logger.info(f"评估结果已保存: {output_path}")
            
        except Exception as e:
            self.logger.error(f"保存评估结果失败: {e}")
    
    async def compare_classifiers(self, 
                                classifiers: List[AIClassifier],
                                test_files: List[FileInfo],
                                **kwargs) -> Dict[str, Any]:
        """比较多个分类器的性能"""
        
        self.logger.info(f"开始比较 {len(classifiers)} 个分类器")
        
        comparison_results = {}
        
        # 评估每个分类器
        for classifier in classifiers:
            try:
                result = await self.evaluate_classifier(classifier, test_files, **kwargs)
                comparison_results[classifier.name] = result
            except Exception as e:
                self.logger.error(f"评估分类器失败 {classifier.name}: {e}")
                comparison_results[classifier.name] = {"error": str(e)}
        
        # 生成比较报告
        comparison_report = self._generate_comparison_report(comparison_results)
        
        # 保存比较结果
        if self.save_results:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"classifier_comparison_{timestamp}.json"
            output_path = self.results_dir / filename
            
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(comparison_report, f, indent=2, ensure_ascii=False, default=str)
            
            self.logger.info(f"比较结果已保存: {output_path}")
        
        return comparison_report
    
    def _generate_comparison_report(self, results: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
        """生成比较报告"""
        
        valid_results = {name: result for name, result in results.items() if "error" not in result}
        
        if not valid_results:
            return {"error": "没有有效的评估结果进行比较"}
        
        # 提取关键指标进行比较
        comparison_metrics = {}
        
        for classifier_name, result in valid_results.items():
            performance = result.get("performance_metrics", {})
            test_summary = result.get("test_summary", {})
            accuracy = result.get("accuracy_metrics", {})
            
            comparison_metrics[classifier_name] = {
                "success_rate": test_summary.get("success_rate", 0.0),
                "average_confidence": performance.get("average_confidence", 0.0),
                "average_processing_time": performance.get("average_processing_time", 0.0),
                "throughput": performance.get("throughput", 0.0),
                "accuracy": accuracy.get("accuracy", 0.0),
                "macro_f1": accuracy.get("macro_f1", 0.0)
            }
        
        # 找出最佳分类器
        best_classifiers = {
            "highest_accuracy": max(comparison_metrics.items(), key=lambda x: x[1]["accuracy"])[0] if comparison_metrics else None,
            "highest_confidence": max(comparison_metrics.items(), key=lambda x: x[1]["average_confidence"])[0] if comparison_metrics else None,
            "fastest_processing": min(comparison_metrics.items(), key=lambda x: x[1]["average_processing_time"])[0] if comparison_metrics else None,
            "highest_throughput": max(comparison_metrics.items(), key=lambda x: x[1]["throughput"])[0] if comparison_metrics else None
        }
        
        return {
            "comparison_summary": {
                "total_classifiers": len(results),
                "valid_results": len(valid_results),
                "comparison_timestamp": datetime.now().isoformat()
            },
            "comparison_metrics": comparison_metrics,
            "best_classifiers": best_classifiers,
            "detailed_results": results
        }
