#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import json
import logging
from typing import List, Dict, Any
from statistics import mean

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class MetricAggregator:
    """
    指标聚合器类，用于计算实验结果的汇总指标
    """
    
    def __init__(self):
        """初始化指标聚合器"""
        pass
    
    def compute_aggregate_metrics(self, results: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        计算实验结果的汇总指标（平均值）
        
        Args:
            results: 实验结果列表
        
        Returns:
            包含汇总指标的字典
        """
        # 初始化指标收集器
        metrics_collector = {
            'cosine_similarity': [],
            'bertscore': [],
            'meteor': [],
            'rouge-1': [],
            'rouge-2': [],
            'rouge-l': [],
            'rouge-1_p': [],
            'rouge-1_r': [],
            'rouge-2_p': [],
            'rouge-2_r': [],
            'rouge-l_p': [],
            'rouge-l_r': []
        }
        
        # BLEU指标单独处理
        bleu_metrics = {
            'bleu-1': [],
            'bleu-2': [],
            'bleu-3': [],
            'bleu-4': []
        }
        
        total_samples = 0
        valid_samples = 0
        
        # 收集所有有效样本的指标
        for experiment in results:
            if 'results' not in experiment:
                continue
            
            for sample in experiment['results']:
                total_samples += 1
                # 跳过有错误的样本
                if 'error' in sample:
                    continue
                
                # 确保有text_metrics字段
                if 'text_metrics' not in sample:
                    continue
                
                valid_samples += 1
                metrics = sample['text_metrics']
                
                # 收集非BLEU指标
                for metric_name in metrics_collector.keys():
                    if metric_name in metrics and isinstance(metrics[metric_name], (int, float)):
                        metrics_collector[metric_name].append(metrics[metric_name])
                
                # 收集BLEU指标
                if 'bleu' in metrics and isinstance(metrics['bleu'], dict):
                    for bleu_name in bleu_metrics.keys():
                        if bleu_name in metrics['bleu'] and isinstance(metrics['bleu'][bleu_name], (int, float)):
                            bleu_metrics[bleu_name].append(metrics['bleu'][bleu_name])
        
        # 计算平均值
        aggregated_metrics = {}
        
        # 计算非BLEU指标的平均值
        for metric_name, values in metrics_collector.items():
            if values:
                aggregated_metrics[metric_name] = round(mean(values), 4)
            else:
                aggregated_metrics[metric_name] = 0.0
        
        # 计算BLEU指标的平均值
        bleu_avg = {}
        for bleu_name, values in bleu_metrics.items():
            if values:
                bleu_avg[bleu_name] = round(mean(values), 4)
            else:
                bleu_avg[bleu_name] = 0.0
        
        aggregated_metrics['bleu'] = bleu_avg
        
        # 添加统计信息
        aggregated_metrics['total_samples'] = total_samples
        aggregated_metrics['valid_samples'] = valid_samples
        
        return aggregated_metrics
    
    def load_results_from_file(self, file_path: str) -> List[Dict[str, Any]]:
        """
        从JSON文件加载实验结果
        
        Args:
            file_path: 结果文件路径
        
        Returns:
            实验结果列表
        """
        try:
            if not os.path.exists(file_path):
                logger.error(f"结果文件不存在: {file_path}")
                return []
            
            with open(file_path, 'r', encoding='utf-8') as f:
                results = json.load(f)
            
            # 确保返回的是列表
            if not isinstance(results, list):
                # 如果是字典形式，尝试提取其中的结果列表
                if isinstance(results, dict) and 'results' in results:
                    results = results['results']
                else:
                    results = [results]
            
            return results
        except Exception as e:
            logger.error(f"加载结果文件失败: {str(e)}")
            return []
    
    def save_aggregated_results(self, aggregated_metrics: Dict[str, Any], output_file: str) -> bool:
        """
        保存汇总结果到文件
        
        Args:
            aggregated_metrics: 汇总指标
            output_file: 输出文件路径
        
        Returns:
            是否保存成功
        """
        try:
            # 确保输出目录存在
            output_dir = os.path.dirname(output_file)
            if output_dir and not os.path.exists(output_dir):
                os.makedirs(output_dir)
            
            with open(output_file, 'w', encoding='utf-8') as f:
                json.dump(aggregated_metrics, f, ensure_ascii=False, indent=2)
            
            logger.info(f"汇总指标已成功保存到: {output_file}")
            return True
        except Exception as e:
            logger.error(f"保存汇总指标失败: {str(e)}")
            return False

def main():
    """主函数，演示如何计算实验结果的汇总指标"""
    # 创建指标聚合器实例
    aggregator = MetricAggregator()
    
    # 示例：从结果文件加载数据并计算汇总指标
    result_file = "../results/experiment_results.json"  # 假设结果文件路径
    
    if os.path.exists(result_file):
        logger.info(f"从文件加载实验结果: {result_file}")
        results = aggregator.load_results_from_file(result_file)
        
        if results:
            logger.info(f"成功加载 {len(results)} 组实验结果")
            
            # 计算汇总指标
            aggregated_metrics = aggregator.compute_aggregate_metrics(results)
            
            # 打印汇总指标
            logger.info("===== 实验结果汇总指标（平均值）=====")
            logger.info(f"总样本数: {aggregated_metrics['total_samples']}")
            logger.info(f"有效样本数: {aggregated_metrics['valid_samples']}")
            
            # 打印主要指标
            main_metrics = ['cosine_similarity', 'bertscore', 'meteor', 'rouge-1', 'rouge-2', 'rouge-l']
            for metric in main_metrics:
                if metric in aggregated_metrics:
                    logger.info(f"{metric}: {aggregated_metrics[metric]:.4f}")
            
            # 打印BLEU指标
            if 'bleu' in aggregated_metrics:
                logger.info("BLEU指标:")
                for bleu_type, score in aggregated_metrics['bleu'].items():
                    logger.info(f"  {bleu_type}: {score:.4f}")
            
            # 保存汇总结果
            output_file = "../results/aggregated_metrics.json"
            aggregator.save_aggregated_results(aggregated_metrics, output_file)
        else:
            logger.warning("没有加载到有效的实验结果")
    else:
        logger.warning(f"结果文件不存在: {result_file}")
        # 使用模拟数据演示
        logger.info("使用模拟数据演示汇总指标计算...")
        
        # 创建模拟实验结果
        mock_results = [
            {
                "experiment_id": "context_window_4",
                "results": [
                    {
                        "text_metrics": {
                            "cosine_similarity": 0.5263,
                            "bertscore": 0.5645,
                            "bleu": {"bleu-1": 0.0494, "bleu-2": 0.0165, "bleu-3": 0.0091, "bleu-4": 0.0057},
                            "rouge-1": 0.1168,
                            "rouge-2": 0.0,
                            "rouge-l": 0.073,
                            "meteor": 0.0877
                        }
                    },
                    {
                        "text_metrics": {
                            "cosine_similarity": 0.4746,
                            "bertscore": 0.5731,
                            "bleu": {"bleu-1": 0.0625, "bleu-2": 0.0314, "bleu-3": 0.0156, "bleu-4": 0.0093},
                            "rouge-1": 0.1579,
                            "rouge-2": 0.0354,
                            "rouge-l": 0.0789,
                            "meteor": 0.1262
                        }
                    }
                ]
            }
        ]
        
        # 计算汇总指标
        aggregated_metrics = aggregator.compute_aggregate_metrics(mock_results)
        
        # 打印模拟结果
        logger.info("模拟数据汇总指标:")
        for metric in ['cosine_similarity', 'bertscore', 'meteor', 'rouge-1', 'rouge-2', 'rouge-l']:
            if metric in aggregated_metrics:
                logger.info(f"{metric}: {aggregated_metrics[metric]:.4f}")
        
        if 'bleu' in aggregated_metrics:
            logger.info("BLEU指标:")
            for bleu_type, score in aggregated_metrics['bleu'].items():
                logger.info(f"  {bleu_type}: {score:.4f}")

if __name__ == "__main__":
    main()