# ===== MMLU-Pro 最终评估结果 =====
# 最佳参数: {'temperature': 0.5, 'top_k': 20, 'repetition_penalty': 2.0}
# 快速评估准确率: 17.14%
# 完整评估准确率: 10.36%



import re
import random
import json
from tqdm import tqdm
import datasets
from datetime import datetime
import os
import numpy as np
from infere import generate_text
import matplotlib.pyplot as plt

def evaluate_mmlu_pro(
    num_few_shot: int = 3,
    num_test_samples: int = 20,
    temperature: float = 0.3,
    top_k: int = 20,
    repetition_penalty: float = 1.5,
    max_length: int = 600,
    save_results: bool = True,
    verbose: bool = False
) -> dict:
    """
    在MMLU-Pro数据集上评估模型性能（优化版）
    
    参数:
    - num_few_shot: few-shot示例数量（默认优化值）
    - num_test_samples: 每类别测试样本数量
    - temperature: 优化后的温度参数
    - top_k: 优化后的top-k采样
    - repetition_penalty: 优化后的重复惩罚
    - max_length: 增加的最大生成长度
    - save_results: 是否保存详细结果
    - verbose: 是否输出详细日志
    """
    # 加载MMLU-Pro数据集
    try:
        dataset = datasets.load_dataset('mmlu-pro')
        if verbose:
            print("数据集加载成功")
    except Exception as e:
        print(f"无法加载MMLU-Pro数据集: {str(e)}")
        return {}
    
    # 类别列表
    categories = ['computer science', 'math', 'chemistry', 'engineering', 'law', 'biology',
                  'health', 'physics', 'business', 'philosophy', 'economics', 'other',
                  'psychology', 'history']
    
    # 增强型提示模板
    system_prompt = (
        "你是一位严谨的学术专家，请按照以下步骤分析问题：\n"
        "1. 确定问题所属的学科领域\n"
        "2. 分析关键概念和逻辑关系\n"
        "3. 分步推理得出结论\n"
        "4. 最终答案格式：'答案：[选项字母]'\n\n"
    )
    
    # 准备few-shot上下文（限制示例数量）
    category_contexts = {}
    if verbose:
        print("准备few-shot上下文示例...")
    
    for category in categories:
        val_ds = dataset['validation'].filter(lambda x: x['category'] == category)
        if len(val_ds) < num_few_shot:
            examples = val_ds
        else:
            examples = val_ds.select(random.sample(range(len(val_ds)), num_few_shot))
        
        context_str = ""
        for example in examples:
            context_str += f"Q: {example['question']}\n"
            context_str += form_options(example['options'])
            context_str += f"推理过程: {example['cot_content']}\n"
            context_str += f"答案: {example['answer']}\n\n"
        
        category_contexts[category] = context_str
    
    results = {}
    category_accuracies = {}
    error_analysis = {
        "reasoning_incomplete": 0,
        "knowledge_gap": 0,
        "format_error": 0,
        "random_guess": 0
    }
    
    # 遍历每个类别进行评估
    for category in tqdm(categories, desc="评估类别"):
        try:
            # 获取当前类别的测试数据集
            category_ds = dataset['test'].filter(lambda x: x['category'] == category)
            
            if num_test_samples is not None and num_test_samples < len(category_ds):
                category_ds = category_ds.select(range(num_test_samples))
            
            # 当前类别的few-shot上下文
            context_prompt = category_contexts[category]
            
            correct = 0
            total = 0
            category_results = []
            
            for example in tqdm(category_ds, desc=category, leave=False):
                # 构建完整提示
                prompt = system_prompt + context_prompt
                prompt += f"Q: {example['question']}\n"
                prompt += form_options(example['options'])
                prompt += "\n请严格按照要求逐步思考推理，然后给出答案。"
                
                # 生成答案
                response = generate_text(
                    prompt=prompt,
                    max_length=max_length,
                    temperature=temperature,
                    top_k=top_k,
                    repetition_penalty=repetition_penalty,
                    eos_token=2
                )
                
                # 提取预测答案
                prediction = get_prediction(response)
                actual = example["answer"]
                
                # 分析错误类型
                is_correct = prediction == actual
                if not is_correct:
                    error_type = analyze_error(response, prediction, actual)
                    error_analysis[error_type] += 1
                
                # 记录结果
                correct += int(is_correct)
                total += 1
                
                if save_results:
                    category_results.append({
                        "question": example["question"],
                        "predicted": prediction,
                        "actual": actual,
                        "response": response,
                        "correct": is_correct
                    })
            
            # 计算类别准确率
            accuracy = correct / total if total > 0 else 0
            category_accuracies[category] = accuracy
            
            if save_results:
                results[category] = {
                    "accuracy": accuracy,
                    "num_samples": total,
                    "details": category_results
                }
                
            if verbose:
                print(f"{category}: {accuracy:.2%} ({correct}/{total})")
                
        except Exception as e:
            print(f"评估{category}时出错: {str(e)}")
    
    # 计算总体准确率
    mean_accuracy = np.mean(list(category_accuracies.values())) if category_accuracies else 0
    
    # 保存评估结果
    if save_results:
        os.makedirs("eval_results", exist_ok=True)
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        result_file = f"eval_results/mmlu_pro_results_{timestamp}.json"
        
        with open(result_file, "w", encoding="utf-8") as f:
            json.dump({
                "overall_accuracy": mean_accuracy,
                "category_accuracies": category_accuracies,
                "error_analysis": error_analysis,
                "params": {
                    "temperature": temperature,
                    "top_k": top_k,
                    "repetition_penalty": repetition_penalty,
                    "num_few_shot": num_few_shot,
                    "max_length": max_length
                },
                "category_results": results
            }, f, ensure_ascii=False, indent=2)
        
        # 可视化结果
        visualize_results(category_accuracies, error_analysis, timestamp)
        
        if verbose:
            print(f"结果已保存至 {result_file}")
    
    return mean_accuracy, category_accuracies, error_analysis

def form_options(options: list) -> str:
    """优化选项格式处理"""
    option_str = '选项：\n'
    for i, opt in enumerate(options[:10]):  # 只处理前10个选项
        letter = chr(65 + i)
        option_str += f"{letter}. {opt}\n"
    return option_str

def get_prediction(response: str) -> str:
    """增强型答案提取函数"""
    # 多模式匹配策略
    patterns = [
        r'答案\s*[:：]\s*([A-J])',         # 答案: A
        r'正确答案\s*[是|为]\s*([A-J])',    # 正确答案是B
        r'选项\s*([A-J])\s*(?:正确|符合)',  # 选项C正确
        r'最终答案\s*[:：]\s*([A-J])'      # 最终答案：D
    ]
    
    # 第一轮：尝试所有模式匹配
    for pattern in patterns:
        match = re.search(pattern, response)
        if match:
            return match.group(1)
    
    # 第二轮：提取响应中所有大写字母选项
    option_letters = re.findall(r'\b([A-J])\b', response)
    if option_letters:
        # 取最后提到的选项
        return option_letters[-1]
    
    # 第三轮：回溯最后几行内容
    lines = response.split('\n')
    for line in reversed(lines[-5:]):  # 检查最后5行
        if re.search(r'[A-J]\.', line):
            match = re.search(r'([A-J])', line)
            if match:
                return match.group(1)
    
    # 最终回退策略：随机选择（标记为随机猜测）
    return random.choice('ABCDEFGHIJ')

def analyze_error(response: str, prediction: str, actual: str) -> str:
    """错误类型分析"""
    # 如果预测的是实际不存在的选项
    if prediction not in 'ABCDEFGHIJ':
        return "format_error"
    
    # 检查响应中是否有推理过程
    reasoning_indicators = ["推理", "分析", "步骤", "因为", "所以", "因此"]
    reasoning_exists = any(indicator in response for indicator in reasoning_indicators)
    
    # 检查推理是否中途停止
    incomplete_indicators = ["...", "等等", "等", "未完", "待续", "请继续"]
    incomplete = any(indicator in response for indicator in incomplete_indicators)
    
    # 检查知识相关性
    question_keywords = re.findall(r'\w{5,}', response[:100])  # 提取前100字符中的关键词
    relevant = len(question_keywords) > 3  # 至少有4个关键词是相关的
    
    # 分类错误类型
    if not reasoning_exists:
        return "random_guess"
    elif incomplete:
        return "reasoning_incomplete"
    elif not relevant:
        return "knowledge_gap"
    else:
        return "format_error"

def visualize_results(category_accuracies, error_analysis, timestamp):
    """结果可视化"""
    plt.figure(figsize=(14, 8))
    
    # 类别准确率可视化
    plt.subplot(1, 2, 1)
    sorted_categories = sorted(category_accuracies.items(), key=lambda x: x[1])
    categories = [cat[0] for cat in sorted_categories]
    accuracies = [cat[1] for cat in sorted_categories]
    plt.barh(categories, accuracies, color='skyblue')
    plt.title('MMLU-Pro 类别准确率')
    plt.xlabel('准确率')
    plt.xlim(0, 1)
    plt.grid(axis='x', linestyle='--', alpha=0.6)
    
    # 错误分布可视化
    plt.subplot(1, 2, 2)
    error_labels = ['推理不完整', '知识缺失', '格式错误', '随机猜测']
    error_values = [
        error_analysis['reasoning_incomplete'],
        error_analysis['knowledge_gap'],
        error_analysis['format_error'],
        error_analysis['random_guess']
    ]
    
    # 过滤零值
    valid_labels = [label for i, label in enumerate(error_labels) if error_values[i] > 0]
    valid_values = [val for val in error_values if val > 0]
    
    if valid_values:
        plt.pie(valid_values, labels=valid_labels, autopct='%1.1f%%', 
                colors=['#ff9999','#66b3ff','#99ff99','#ffcc99'])
        plt.title('错误类型分布')
    else:
        plt.text(0.5, 0.5, "无错误记录", ha='center', va='center')
    
    plt.tight_layout()
    plt.savefig(f"eval_results/results_visualization_{timestamp}.png")
    plt.close()

def hyperparameter_search(verbose=True):
    """执行超参数网格搜索找出最佳组合（优化版）"""
    # 定义参数网格（优化范围）
    temperature_options = [0.4,0.6,0.7,0.8,0.9]
    top_k_options = [10]
    repetition_penalty_options = [1.4, 1.3,1.6]
    max_length_options = [500]
    
    results = []
    best_accuracy = 0
    best_params = {}
    
    total_combinations = (
        len(temperature_options) * 
        len(top_k_options) * 
        len(repetition_penalty_options) * 
        len(max_length_options)
    )
    
    if verbose:
        print(f"开始MMLU-Pro超参数搜索 ({total_combinations}种组合)")
    
    # 遍历所有参数组合
    with tqdm(total=total_combinations, desc="参数搜索进度") as pbar:
        for temp in temperature_options:
            for top_k in top_k_options:
                for penalty in repetition_penalty_options:
                    for max_len in max_length_options:
                        # 使用小规模测试集快速评估
                        accuracy, _, _ = evaluate_mmlu_pro(
                            num_few_shot=2,
                            num_test_samples=5,  # 每类5个样本
                            temperature=temp,
                            top_k=top_k,
                            repetition_penalty=penalty,
                            max_length=max_len,
                            save_results=False,
                            verbose=False
                        )
                        
                        # 记录当前结果
                        param_str = f"temp={temp}, top_k={top_k}, penalty={penalty}, max_len={max_len}"
                        result_entry = {
                            "params": {
                                "temperature": temp,
                                "top_k": top_k,
                                "repetition_penalty": penalty,
                                "max_length": max_len
                            },
                            "accuracy": accuracy
                        }
                        
                        results.append(result_entry)
                        
                        # 更新最佳参数
                        if accuracy > best_accuracy:
                            best_accuracy = accuracy
                            best_params = result_entry["params"]
                        
                        if verbose:
                            print(f"{param_str} => {accuracy:.2%}")
                        
                        pbar.update(1)
    
    # 保存所有搜索结果
    os.makedirs("eval_results", exist_ok=True)
    search_file = "eval_results/hyperparam_search_results.json"
    with open(search_file, "w") as f:
        json.dump(results, f, indent=2)
    
    print("\n===== 超参数搜索完成 =====")
    print(f"测试了 {len(results)} 种参数组合")
    print(f"最佳参数组合: {best_params}")
    print(f"最高准确率: {best_accuracy:.2%}")
    
    # 可视化参数搜索
    visualize_search_results(results)
    
    return best_params, best_accuracy

def visualize_search_results(results):
    """可视化超参数搜索结果"""
    plt.figure(figsize=(16, 10))
    
    # 创建不同参数的准确率散点图
    parameters = ['temperature', 'top_k', 'repetition_penalty', 'max_length']
    param_names = ['温度', 'Top-K', '重复惩罚', '生成长度']
    
    for i, param in enumerate(parameters):
        plt.subplot(2, 2, i+1)
        param_values = [r['params'][param] for r in results]
        accuracies = [r['accuracy'] for r in results]
        
        # 为相同参数值聚合准确率
        unique_values = sorted(set(param_values))
        avg_acc = []
        for val in unique_values:
            mask = [p == val for p in param_values]
            avg_acc.append(np.mean([acc for j, acc in enumerate(accuracies) if mask[j]]))
        
        plt.plot(unique_values, avg_acc, 'o-', markersize=8)
        plt.title(f'{param_names[i]}参数优化')
        plt.xlabel(param_names[i])
        plt.ylabel('准确率')
        plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.savefig("eval_results/hyperparam_search_visualization.png")
    plt.close()

def analyze_category_performance(category_results):
    """类别表现分析器"""
    print("\n===== 类别表现分析 =====")
    stem_categories = ['math', 'physics', 'chemistry', 'biology', 'engineering', 'computer science']
    humanities = ['law', 'philosophy', 'psychology', 'history', 'business', 'economics']
    
    stem_acc = np.mean([category_results.get(c, 0) for c in stem_categories])
    humanities_acc = np.mean([category_results.get(c, 0) for c in humanities])
    other_acc = category_results.get('other', 0)
    health_acc = category_results.get('health', 0)
    
    print(f"STEM类别平均准确率: {stem_acc:.2%}")
    print(f"人文社科平均准确率: {humanities_acc:.2%}")
    print(f"健康类准确率: {health_acc:.2%}")
    print(f"其他类别准确率: {other_acc:.2%}")
    
    # 建议重点优化领域
    min_acc_category = min(category_results, key=category_results.get)
    print(f"\n建议优先优化的类别: {min_acc_category} ({category_results[min_acc_category]:.2%})")

if __name__ == "__main__":
    # 第一步：执行超参数搜索
    print("=== 启动超参数优化 ===")
    best_params, best_accuracy = hyperparameter_search()
    
    # 第二步：使用最佳参数进行全面评估
    print("\n=== 使用最佳参数进行全面评估 ===")
    final_accuracy, category_results, error_analysis = evaluate_mmlu_pro(
        num_few_shot=best_params.get("num_few_shot", 3),
        num_test_samples=20,
        temperature=best_params.get("temperature", 0.3),
        top_k=best_params.get("top_k", 20),
        repetition_penalty=best_params.get("repetition_penalty", 1.5),
        max_length=best_params.get("max_length", 600),
        save_results=True,
        verbose=True
    )
    
    print("\n===== MMLU-Pro 最终评估结果 =====")
    print(f"最佳参数: {best_params}")
    print(f"快速评估准确率: {best_accuracy:.2%}")
    print(f"完整评估准确率: {final_accuracy:.2%}")
    
    # 类别表现分析
    analyze_category_performance(category_results)
    
    # 错误分析报告
    print("\n===== 错误类型分析 =====")
    total_errors = sum(error_analysis.values())
    if total_errors > 0:
        print(f"推理不完整: {error_analysis['reasoning_incomplete']} ({error_analysis['reasoning_incomplete']/total_errors:.2%})")
        print(f"知识缺失: {error_analysis['knowledge_gap']} ({error_analysis['knowledge_gap']/total_errors:.2%})")
        print(f"格式错误: {error_analysis['format_error']} ({error_analysis['format_error']/total_errors:.2%})")
        print(f"随机猜测: {error_analysis['random_guess']} ({error_analysis['random_guess']/total_errors:.2%})")
    else:
        print("无错误记录")
    
    # 保存最终结果摘要
    summary = {
        "best_params": best_params,
        "quick_search_accuracy": best_accuracy,
        "full_evaluation_accuracy": final_accuracy,
        "category_performance": category_results,
        "error_analysis": error_analysis,
        "timestamp": datetime.now().isoformat()
    }
    
    os.makedirs("eval_results", exist_ok=True)
    with open("eval_results/final_summary.json", "w") as f:
        json.dump(summary, f, indent=2)
    
    print("\n评估已完成。详细结果保存在 eval_results 目录中。")