"""
多模态评估器模块

该模块提供了多模态推理结果的评估功能，支持图像和文本样本的综合评估。
主要功能包括：
- 复用现有的评估器逻辑
- 支持多模态样本的分类评估
- 提供详细的评估指标和统计信息
- 生成评估报告和可视化结果

使用方法：
    from eval_src.multimodal_evaluator import MultimodalEvaluator
    evaluator = MultimodalEvaluator(config)
    results = evaluator.evaluate_batch(inference_results)
"""

import sys
sys.path.append(".")

from eval_src.Evaluator import GSM8KEvaluator, Evaluator
from typing import List, Dict, Any, Optional
import json
import logging
from collections import defaultdict
import os

# 设置日志
logger = logging.getLogger(__name__)


class MultimodalEvaluator:
    """
    多模态推理评估器
    
    该类提供了多模态推理结果的评估功能，支持图像和文本样本的分类评估。
    能够复用现有的评估器逻辑，并提供多模态特有的评估指标。
    
    Attributes:
        config: 配置对象
        base_evaluator: 基础评估器实例
        dataset_type: 数据集类型
    """
    
    def __init__(self, config):
        """
        初始化多模态评估器
        
        Args:
            config: 配置对象，包含评估相关的配置参数
        """
        self.config = config
        self.dataset_type = getattr(config.data, 'dataset_type', 'math')
        
        # 根据数据集类型选择评估器
        if self.dataset_type == 'math' or self.dataset_type == 'GSM8K':
            self.base_evaluator = GSM8KEvaluator()
        else:
            # 可以扩展其他评估器
            self.base_evaluator = GSM8KEvaluator()
        
        logger.info(f"初始化多模态评估器，数据集类型: {self.dataset_type}")
    
    def evaluate_batch(self, results: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        批量评估多模态推理结果
        
        该方法对批量推理结果进行全面评估，包括总体指标、分类指标和详细统计。
        
        Args:
            results (List[Dict[str, Any]]): 推理结果列表
            
        Returns:
            Dict[str, Any]: 评估结果字典，包含各种评估指标
            
        Example:
            >>> evaluator = MultimodalEvaluator(config)
            >>> evaluation = evaluator.evaluate_batch(results)
            >>> print(f"总体准确率: {evaluation['overall_accuracy']:.2%}")
        """
        logger.info(f"开始评估 {len(results)} 个推理结果")
        
        # 初始化统计计数器
        total_samples = len(results)
        correct_count = 0
        image_correct = 0
        text_only_correct = 0
        image_samples = 0
        text_only_samples = 0
        error_samples = 0
        
        # 详细结果存储
        detailed_results = []
        error_analysis = defaultdict(int)
        
        # 逐个评估样本
        for i, result in enumerate(results):
            try:
                # 提取关键信息
                predicted_answer = result.get('best_answer', '')
                gold_answer = result.get('gold_answer', '')
                has_image = result.get('has_image', False)
                sample_id = result.get('sample_id', i)
                
                # 检查是否有错误
                if 'error' in result:
                    error_samples += 1
                    error_type = result['error'].split(':')[0] if ':' in result['error'] else 'Unknown'
                    error_analysis[error_type] += 1
                    
                    # 记录错误样本
                    detailed_result = {
                        'sample_id': sample_id,
                        'has_image': has_image,
                        'predicted_answer': '',
                        'gold_answer': gold_answer,
                        'is_correct': False,
                        'error': result['error']
                    }
                    detailed_results.append(detailed_result)
                    
                    # 更新分类计数
                    if has_image:
                        image_samples += 1
                    else:
                        text_only_samples += 1
                    
                    continue
                
                # 使用基础评估器判断正确性
                is_correct = self._evaluate_single_answer(predicted_answer, gold_answer)
                
                # 更新计数器
                if is_correct:
                    correct_count += 1
                    if has_image:
                        image_correct += 1
                    else:
                        text_only_correct += 1
                
                # 更新样本分类计数
                if has_image:
                    image_samples += 1
                else:
                    text_only_samples += 1
                
                # 记录详细结果
                detailed_result = {
                    'sample_id': sample_id,
                    'has_image': has_image,
                    'predicted_answer': predicted_answer,
                    'gold_answer': gold_answer,
                    'is_correct': is_correct,
                    'question': result.get('question', ''),
                    'image_path': result.get('image_path', ''),
                    'processing_time': result.get('processing_time', 0)
                }
                detailed_results.append(detailed_result)
                
            except Exception as e:
                logger.error(f"评估样本 {i} 时出错: {e}")
                error_samples += 1
                error_analysis['evaluation_error'] += 1
                
                # 记录评估错误
                detailed_result = {
                    'sample_id': result.get('sample_id', i),
                    'has_image': result.get('has_image', False),
                    'predicted_answer': '',
                    'gold_answer': result.get('gold_answer', ''),
                    'is_correct': False,
                    'error': f"评估错误: {str(e)}"
                }
                detailed_results.append(detailed_result)
        
        # 计算评估指标
        evaluation_summary = self._calculate_metrics(
            total_samples, correct_count, image_correct, text_only_correct,
            image_samples, text_only_samples, error_samples
        )
        
        # 添加错误分析
        evaluation_summary['error_analysis'] = dict(error_analysis)
        evaluation_summary['detailed_results'] = detailed_results
        
        # 保存评估结果
        self._save_evaluation_results(evaluation_summary)
        
        logger.info("评估完成")
        logger.info(f"总体准确率: {evaluation_summary['overall_accuracy']:.2%}")
        logger.info(f"图像样本准确率: {evaluation_summary['image_accuracy']:.2%}")
        logger.info(f"文本样本准确率: {evaluation_summary['text_only_accuracy']:.2%}")
        
        return evaluation_summary
    
    def _evaluate_single_answer(self, predicted_answer: str, gold_answer: str) -> bool:
        """
        评估单个答案的正确性
        
        Args:
            predicted_answer (str): 预测答案
            gold_answer (str): 标准答案
            
        Returns:
            bool: 是否正确
        """
        try:
            return self.base_evaluator.check_answers_equiv(predicted_answer, gold_answer)
        except Exception as e:
            logger.warning(f"答案评估失败: {e}")
            return False
    
    def _calculate_metrics(
        self, total_samples: int, correct_count: int, image_correct: int, 
        text_only_correct: int, image_samples: int, text_only_samples: int, 
        error_samples: int
    ) -> Dict[str, Any]:
        """
        计算评估指标
        
        Args:
            total_samples: 总样本数
            correct_count: 正确样本数
            image_correct: 图像样本正确数
            text_only_correct: 文本样本正确数
            image_samples: 图像样本总数
            text_only_samples: 文本样本总数
            error_samples: 错误样本数
            
        Returns:
            Dict[str, Any]: 评估指标字典
        """
        # 计算总体指标
        overall_accuracy = correct_count / total_samples if total_samples > 0 else 0
        success_rate = (total_samples - error_samples) / total_samples if total_samples > 0 else 0
        
        # 计算分类指标
        image_accuracy = image_correct / image_samples if image_samples > 0 else 0
        text_only_accuracy = text_only_correct / text_only_samples if text_only_samples > 0 else 0
        
        # 计算性能对比
        performance_gap = image_accuracy - text_only_accuracy
        
        return {
            'total_samples': total_samples,
            'correct_count': correct_count,
            'error_count': error_samples,
            'overall_accuracy': overall_accuracy,
            'success_rate': success_rate,
            
            'image_samples': image_samples,
            'image_correct': image_correct,
            'image_accuracy': image_accuracy,
            
            'text_only_samples': text_only_samples,
            'text_only_correct': text_only_correct,
            'text_only_accuracy': text_only_accuracy,
            
            'performance_gap': performance_gap,
            'image_advantage': performance_gap > 0,
            
            'evaluation_timestamp': self._get_timestamp()
        }
    
    def _save_evaluation_results(self, evaluation_summary: Dict[str, Any]):
        """
        保存评估结果
        
        Args:
            evaluation_summary: 评估结果摘要
        """
        output_path = self.config.output_config.get('evaluation_path')
        if not output_path:
            return
        
        try:
            # 确保输出目录存在
            output_dir = os.path.dirname(output_path)
            if output_dir:
                os.makedirs(output_dir, exist_ok=True)
            
            # 保存完整评估结果
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(evaluation_summary, f, ensure_ascii=False, indent=2)
            
            logger.info(f"评估结果已保存到: {output_path}")
            
            # 保存简化的摘要报告
            summary_path = output_path.replace('.json', '_summary.txt')
            self._save_summary_report(evaluation_summary, summary_path)
            
        except Exception as e:
            logger.error(f"保存评估结果失败: {e}")
    
    def _save_summary_report(self, evaluation_summary: Dict[str, Any], output_path: str):
        """
        保存评估摘要报告
        
        Args:
            evaluation_summary: 评估结果摘要
            output_path: 输出路径
        """
        try:
            with open(output_path, 'w', encoding='utf-8') as f:
                f.write("=== 多模态推理评估报告 ===\n\n")
                
                # 基本统计
                f.write("## 基本统计\n")
                f.write(f"总样本数: {evaluation_summary['total_samples']}\n")
                f.write(f"成功处理: {evaluation_summary['total_samples'] - evaluation_summary['error_count']}\n")
                f.write(f"处理失败: {evaluation_summary['error_count']}\n")
                f.write(f"成功率: {evaluation_summary['success_rate']:.2%}\n\n")
                
                # 准确率统计
                f.write("## 准确率统计\n")
                f.write(f"总体准确率: {evaluation_summary['overall_accuracy']:.2%}\n")
                f.write(f"图像样本准确率: {evaluation_summary['image_accuracy']:.2%} ({evaluation_summary['image_correct']}/{evaluation_summary['image_samples']})\n")
                f.write(f"文本样本准确率: {evaluation_summary['text_only_accuracy']:.2%} ({evaluation_summary['text_only_correct']}/{evaluation_summary['text_only_samples']})\n\n")
                
                # 性能对比
                f.write("## 性能对比\n")
                f.write(f"图像vs文本性能差距: {evaluation_summary['performance_gap']:.2%}\n")
                if evaluation_summary['image_advantage']:
                    f.write("图像样本表现更好\n")
                else:
                    f.write("文本样本表现更好\n")
                f.write("\n")
                
                # 错误分析
                if evaluation_summary.get('error_analysis'):
                    f.write("## 错误分析\n")
                    for error_type, count in evaluation_summary['error_analysis'].items():
                        f.write(f"{error_type}: {count} 次\n")
                    f.write("\n")
                
                f.write(f"评估时间: {evaluation_summary['evaluation_timestamp']}\n")
            
            logger.info(f"评估摘要已保存到: {output_path}")
            
        except Exception as e:
            logger.error(f"保存评估摘要失败: {e}")
    
    def _get_timestamp(self) -> str:
        """
        获取当前时间戳
        
        Returns:
            str: 格式化的时间戳
        """
        import datetime
        return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    
    def compare_modalities(self, results: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        比较图像和文本样本的性能差异
        
        Args:
            results: 推理结果列表
            
        Returns:
            Dict[str, Any]: 模态对比结果
        """
        image_results = [r for r in results if r.get('has_image', False)]
        text_results = [r for r in results if not r.get('has_image', False)]
        
        # 分别评估两种模态
        image_eval = self.evaluate_batch(image_results) if image_results else None
        text_eval = self.evaluate_batch(text_results) if text_results else None
        
        comparison = {
            'image_evaluation': image_eval,
            'text_evaluation': text_eval,
            'comparison_summary': {}
        }
        
        if image_eval and text_eval:
            comparison['comparison_summary'] = {
                'image_accuracy': image_eval['overall_accuracy'],
                'text_accuracy': text_eval['overall_accuracy'],
                'accuracy_difference': image_eval['overall_accuracy'] - text_eval['overall_accuracy'],
                'image_sample_count': len(image_results),
                'text_sample_count': len(text_results)
            }
        
        return comparison
    
    def analyze_error_patterns(self, results: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        分析错误模式
        
        Args:
            results: 推理结果列表
            
        Returns:
            Dict[str, Any]: 错误模式分析结果
        """
        error_patterns = {
            'by_modality': {'image': defaultdict(int), 'text': defaultdict(int)},
            'by_error_type': defaultdict(int),
            'common_errors': [],
            'error_examples': []
        }
        
        for result in results:
            if 'error' in result:
                has_image = result.get('has_image', False)
                error_msg = result['error']
                error_type = error_msg.split(':')[0] if ':' in error_msg else 'Unknown'
                
                # 按模态分类
                modality = 'image' if has_image else 'text'
                error_patterns['by_modality'][modality][error_type] += 1
                
                # 按错误类型分类
                error_patterns['by_error_type'][error_type] += 1
                
                # 收集错误示例
                if len(error_patterns['error_examples']) < 10:
                    error_patterns['error_examples'].append({
                        'sample_id': result.get('sample_id', 'unknown'),
                        'has_image': has_image,
                        'error': error_msg,
                        'question': result.get('question', '')[:100] + '...'
                    })
        
        # 找出常见错误
        sorted_errors = sorted(error_patterns['by_error_type'].items(), key=lambda x: x[1], reverse=True)
        error_patterns['common_errors'] = sorted_errors[:5]
        
        return error_patterns 