#!/usr/bin/env python3
"""
优化的重排模型评估工作流 - 逐个问题处理，实时保存和可视化

使用示例:

=== 完整运行模式 (run) ===
1. 从文件读取问题:
   python -m src.optimized_workflow --mode run --questions questions.csv --type file

2. 直接输入问题字符串:
   python -m src.optimized_workflow --mode run --questions "请解释什么是人工智能?" --type str

3. 自动判断模式（默认）:
   python -m src.optimized_workflow --questions questions.csv --type auto
   python -m src.optimized_workflow --questions "请解释什么是人工智能?" --type auto

=== 重新生成模式 (regenerate) ===
4. 从现有CSV重新生成指标:
   python -m src.optimized_workflow --mode regenerate --output-dir results_csv_question

5. 使用不同的评估参数重新生成:
   python -m src.optimized_workflow --mode regenerate --output-dir results_csv_question --eval-k 10 --threshold 0.7

6. 指定特定的CSV文件:
   python -m src.optimized_workflow --mode regenerate --csv-file path/to/reranking_results.csv --output-dir new_results
"""

import os
import csv
import json
import time
import argparse
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from typing import List, Dict, Any, Optional

# 导入项目模块
from src.api_client import search_knowledge_chunks, evaluate_chunks_batch, call_bge_rerank_api, call_qwen_rerank_api
from src.evaluation import calculate_ndcg, calculate_average_relevance, calculate_hit_rate
from src.config import TOP_K, RELEVANCE_THRESHOLD

class OptimizedWorkflow:
    """
    优化的工作流处理器
    
    特性:
    - 逐个问题处理，实时保存和可视化
    - 自动清理重复数据：处理问题时自动删除该问题的旧结果
    - 支持从CSV文件重新生成指标
    - UTF-8 BOM编码兼容Excel
    """
    
    def __init__(self, output_dir: str, top_k: int = 50):
        """
        初始化工作流处理器
        
        参数:
            output_dir: 输出目录
            top_k: 每个问题检索的知识块数量
        """
        self.output_dir = output_dir
        self.top_k = top_k
        self.eval_k = TOP_K  # 评估使用的TopK
        
        # 创建输出目录
        os.makedirs(self.output_dir, exist_ok=True)
        os.makedirs(os.path.join(self.output_dir, "visualizations"), exist_ok=True)
        
        # 中间结果文件
        self.eval_results_file = os.path.join(self.output_dir, "evaluation_results.csv")
        self.rerank_results_file = os.path.join(self.output_dir,"reranking_results.csv")
        
        # 初始化CSV文件
        self._init_csv_files()
        
        print(f"🚀 优化工作流初始化完成")
        print(f"📁 输出目录: {self.output_dir}")
        print(f"🔍 检索TopK: {self.top_k}")
        print(f"📊 评估TopK: {self.eval_k}")
    
    def _init_csv_files(self):
        """初始化CSV文件的表头"""
        # 评估结果CSV表头
        if not os.path.exists(self.eval_results_file):
            with open(self.eval_results_file, 'w', newline='', encoding='utf-8-sig') as f:
                writer = csv.writer(f)
                writer.writerow(["问题", "知识块", "评分", "理由"])
        
        # 重排结果CSV表头
        if not os.path.exists(self.rerank_results_file):
            with open(self.rerank_results_file, 'w', newline='', encoding='utf-8-sig') as f:
                writer = csv.writer(f)
                writer.writerow([
                    "问题", "知识块索引", "知识块", "黄金标准评分", 
                    "BGE重排分数", "BGE排名", "Qwen重排分数", "Qwen排名"
                ])
    
    def read_questions(self, questions_input: str, input_type: str = "auto") -> List[str]:
        """
        读取问题列表
        
        参数:
            questions_input: 问题文件路径或单个问题字符串
            input_type: 输入类型，"file"=文件路径，"str"=问题字符串，"auto"=自动判断
            
        返回:
            问题列表
        """
        if input_type == "file":
            # 强制按文件处理
            if not os.path.isfile(questions_input):
                raise FileNotFoundError(f"指定的文件不存在: {questions_input}")
            # 读取CSV文件（支持UTF-8 BOM）
            df = pd.read_csv(questions_input, encoding='utf-8-sig')
            if "问题" in df.columns:
                questions = df["问题"].tolist()
            else:
                questions = df.iloc[:, 0].tolist()  # 使用第一列
        elif input_type == "str":
            # 强制按字符串处理
            questions = [questions_input]
        else:
            # 自动判断模式（保持原有逻辑）
            if os.path.isfile(questions_input):
                # 读取CSV文件（支持UTF-8 BOM）
                df = pd.read_csv(questions_input, encoding='utf-8-sig')
                if "问题" in df.columns:
                    questions = df["问题"].tolist()
                else:
                    questions = df.iloc[:, 0].tolist()  # 使用第一列
            else:
                # 单个问题
                questions = [questions_input]
        
        print(f"📋 读取到 {len(questions)} 个问题 (类型: {input_type})")
        return questions
    
    def process_single_question(self, question: str, question_idx: int, total_questions: int):
        """
        处理单个问题的完整流程
        
        参数:
            question: 问题文本
            question_idx: 问题索引（从0开始）
            total_questions: 总问题数
        """
        print(f"\n{'='*60}")
        print(f"📝 处理问题 {question_idx + 1}/{total_questions}")
        print(f"❓ 问题: {question}")
        print(f"{'='*60}")
        print(f"🔍 检查并清理该问题的旧数据...")
        
        start_time = time.time()
        
        try:
            # 步骤1: 检索知识块
            print(f"🔍 步骤1: 检索知识块 (TopK={self.top_k})")
            passages = search_knowledge_chunks(question, self.top_k)
            print(f"✅ 成功检索到 {len(passages)} 个知识块")
            
            # 步骤2: LLM评分
            print(f"🤖 步骤2: LLM评分知识块")
            eval_results = evaluate_chunks_batch(question, passages)
            print(f"✅ 成功评估 {len(eval_results)} 个知识块")
            
            # 提取评分和理由
            gold_scores = []
            reasons = []
            for chunk, score, reason in eval_results:
                gold_scores.append(score)
                reasons.append(reason)
            
            # 保存评估结果到CSV（append模式）
            self._append_eval_results(question, passages, gold_scores, reasons)
            
            # 步骤3: 重排模型调用
            print(f"🔄 步骤3: 调用重排模型")
            
            # BGE重排
            print(f"  🏷️ BGE重排...")
            bge_scores = call_bge_rerank_api(question, passages, verify_ssl=False, max_retries=3)
            bge_rankings = self._calculate_rankings(bge_scores)
            
            # Qwen重排
            print(f"  🏷️ Qwen重排...")
            qwen_scores = call_qwen_rerank_api(question, passages, verify_ssl=False, max_retries=3)
            qwen_rankings = self._calculate_rankings(qwen_scores)
            
            print(f"✅ 重排完成")
            
            # 保存重排结果到CSV（append模式）
            self._append_rerank_results(
                question, passages, gold_scores, 
                bge_scores, bge_rankings, qwen_scores, qwen_rankings
            )
            
            # 步骤4: 生成当前问题的可视化
            print(f"📊 步骤4: 生成可视化图表")
            self._generate_question_visualization(
                question, question_idx, gold_scores, 
                bge_scores, qwen_scores, bge_rankings, qwen_rankings
            )
            
            elapsed_time = time.time() - start_time
            print(f"⏱️ 问题处理完成，耗时: {elapsed_time:.2f}秒")
            
        except Exception as e:
            print(f"❌ 处理问题时出错: {str(e)}")
            raise
    
    def _calculate_rankings(self, scores: List[float]) -> List[int]:
        """根据分数计算排名"""
        indexed_scores = [(i, score) for i, score in enumerate(scores)]
        indexed_scores.sort(key=lambda x: x[1], reverse=True)
        return [idx for idx, score in indexed_scores]
    
    def _append_eval_results(self, question: str, passages: List[str], scores: List[float], reasons: List[str]):
        """追加评估结果到CSV文件，如果问题已存在则删除旧数据"""
        # 先删除该问题的旧数据（如果存在）
        self._remove_existing_question_data(self.eval_results_file, question)
        
        # 追加新数据
        with open(self.eval_results_file, 'a', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            for passage, score, reason in zip(passages, scores, reasons):
                writer.writerow([question, passage, score, reason])
    
    def _append_rerank_results(self, question: str, passages: List[str], gold_scores: List[float],
                              bge_scores: List[float], bge_rankings: List[int],
                              qwen_scores: List[float], qwen_rankings: List[int]):
        """追加重排结果到CSV文件，如果问题已存在则删除旧数据"""
        # 先删除该问题的旧数据（如果存在）
        self._remove_existing_question_data(self.rerank_results_file, question)
        
        # 追加新数据
        with open(self.rerank_results_file, 'a', newline='', encoding='utf-8') as f:
            writer = csv.writer(f)
            for i, passage in enumerate(passages):
                bge_rank = bge_rankings.index(i) if i in bge_rankings else -1
                qwen_rank = qwen_rankings.index(i) if i in qwen_rankings else -1
                
                writer.writerow([
                    question, i, passage, gold_scores[i],
                    bge_scores[i], bge_rank, qwen_scores[i], qwen_rank
                ])
    
    def _remove_existing_question_data(self, csv_file_path: str, question: str):
        """从CSV文件中删除指定问题的所有数据行"""
        if not os.path.exists(csv_file_path):
            return
        
        try:
            # 读取现有数据
            df = pd.read_csv(csv_file_path, encoding='utf-8-sig')
            
            # 检查是否有数据需要删除
            if '问题' not in df.columns or df.empty:
                return
            
            # 统计删除前的行数
            original_count = len(df)
            
            # 过滤掉指定问题的数据
            df_filtered = df[df['问题'] != question]
            
            # 统计删除的行数
            removed_count = original_count - len(df_filtered)
            
            if removed_count > 0:
                print(f"  🗑️ 删除问题 '{question}' 的 {removed_count} 条旧记录")
                
                # 重新写入文件（保持UTF-8 BOM编码）
                df_filtered.to_csv(csv_file_path, index=False, encoding='utf-8-sig')
            
        except Exception as e:
            print(f"  ⚠️ 删除旧数据时出错: {str(e)}，继续追加新数据")
    
    def _generate_question_visualization(self, question: str, question_idx: int,
                                       gold_scores: List[float], bge_scores: List[float], qwen_scores: List[float],
                                       bge_rankings: List[int], qwen_rankings: List[int]):
        """为单个问题生成可视化图表"""
        plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'Arial Unicode MS', 'DejaVu Sans']
        plt.rcParams['axes.unicode_minus'] = False
        
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
        
        # 图1: 分数分布折线图（参考plot_score_distribution）
        # 定义相关性得分分组
        relevance_labels = ['0-0.2', '0.2-0.4', '0.4-0.6', '0.6-0.8', '0.8-1.0']
        
        # 创建分组数据
        bge_by_group = [[] for _ in range(5)]
        qwen_by_group = [[] for _ in range(5)]
        
        # 将分数按黄金标准得分分组
        for i, gold_score in enumerate(gold_scores):
            bin_idx = min(int(gold_score * 5), 4)  # 将0-1分数映射到0-4的索引
            bge_by_group[bin_idx].append(bge_scores[i])
            qwen_by_group[bin_idx].append(qwen_scores[i])
        
        # 计算每组的平均分数
        bge_avg = []
        qwen_avg = []
        gold_avg = []
        
        for i in range(5):
            if bge_by_group[i]:
                bge_avg.append(sum(bge_by_group[i]) / len(bge_by_group[i]))
                qwen_avg.append(sum(qwen_by_group[i]) / len(qwen_by_group[i]))
                # 黄金标准得分的平均值（每组的中点）
                gold_avg.append((i * 0.2) + 0.1)  # 0.1, 0.3, 0.5, 0.7, 0.9
            else:
                # 如果该组没有数据，使用0
                bge_avg.append(0)
                qwen_avg.append(0)
                gold_avg.append((i * 0.2) + 0.1)
        
        # 绘制折线图
        x = np.arange(len(relevance_labels))
        
        ax1.plot(x, bge_avg, marker='o', linewidth=2, label='BGE重排', color='blue')
        ax1.plot(x, qwen_avg, marker='s', linewidth=2, label='Qwen重排', color='red')
        ax1.plot(x, gold_avg, marker='x', linestyle='--', linewidth=1, label='理想分数', color='gold')
        
        # 添加数据标签
        for i in range(len(x)):
            ax1.text(x[i], bge_avg[i] + 0.02, f"{bge_avg[i]:.2f}", ha='center', va='bottom', fontsize=9)
            ax1.text(x[i], qwen_avg[i] - 0.02, f"{qwen_avg[i]:.2f}", ha='center', va='top', fontsize=9)
        
        # 设置X轴标签
        ax1.set_xticks(x)
        ax1.set_xticklabels(relevance_labels)
        ax1.set_xlabel('黄金标准相关性得分范围', fontsize=11)
        ax1.set_ylabel('平均模型分数', fontsize=11)
        ax1.set_title(f'分数分布对比 - 问题{question_idx + 1}', fontsize=12, fontweight='bold')
        ax1.set_ylim(0, 1.1)  # 设置Y轴范围
        ax1.grid(True, linestyle='--', alpha=0.7)
        ax1.legend()
        
        # 图2: 评估指标对比
        # 计算当前问题的评估指标
        top_k_eval = min(self.eval_k, len(gold_scores))
        bge_top_scores = [gold_scores[idx] for idx in bge_rankings[:top_k_eval]]
        qwen_top_scores = [gold_scores[idx] for idx in qwen_rankings[:top_k_eval]]
        
        bge_ndcg = calculate_ndcg(bge_top_scores, gold_scores, top_k_eval)
        qwen_ndcg = calculate_ndcg(qwen_top_scores, gold_scores, top_k_eval)
        
        bge_avg_rel = calculate_average_relevance(bge_top_scores, top_k_eval)
        qwen_avg_rel = calculate_average_relevance(qwen_top_scores, top_k_eval)
        
        bge_hit_rate = calculate_hit_rate(bge_top_scores, RELEVANCE_THRESHOLD, top_k_eval)
        qwen_hit_rate = calculate_hit_rate(qwen_top_scores, RELEVANCE_THRESHOLD, top_k_eval)
        
        metrics = ['NDCG@5', '平均相关性', '命中率']
        bge_values = [bge_ndcg, bge_avg_rel, bge_hit_rate]
        qwen_values = [qwen_ndcg, qwen_avg_rel, qwen_hit_rate]
        
        x_pos = range(len(metrics))
        width = 0.35
        ax2.bar([p - width/2 for p in x_pos], bge_values, width, label='BGE', color='blue', alpha=0.7)
        ax2.bar([p + width/2 for p in x_pos], qwen_values, width, label='Qwen', color='red', alpha=0.7)
        ax2.set_xlabel('评估指标', fontsize=11)
        ax2.set_ylabel('数值', fontsize=11)
        ax2.set_title('评估指标对比', fontsize=12, fontweight='bold')
        ax2.set_xticks(x_pos)
        ax2.set_xticklabels(metrics)
        ax2.legend()
        ax2.grid(True, alpha=0.3)
        
        # 添加数值标签
        for i, (bge_val, qwen_val) in enumerate(zip(bge_values, qwen_values)):
            ax2.text(i - width/2, bge_val + 0.01, f'{bge_val:.3f}', ha='center', va='bottom', fontsize=9)
            ax2.text(i + width/2, qwen_val + 0.01, f'{qwen_val:.3f}', ha='center', va='bottom', fontsize=9)
        
        # 设置Y轴范围，留出标签空间
        ax2.set_ylim(0, max(max(bge_values), max(qwen_values)) * 1.15)
        
        plt.tight_layout()
        
        # 保存图表
        safe_question = "".join(c for c in question if c.isalnum() or c in (' ', '-', '_')).rstrip()[:50]
        output_path = os.path.join(self.output_dir, "visualizations", f"question_{question_idx + 1:03d}_{safe_question}.png")
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"📊 可视化图表已保存: {output_path}")
    
    def calculate_final_metrics(self):
        """计算最终的评估指标并生成汇总图表"""
        print(f"\n{'='*60}")
        print(f"📊 计算最终评估指标")
        print(f"{'='*60}")
        
        # 读取重排结果（支持UTF-8 BOM）
        df = pd.read_csv(self.rerank_results_file, encoding='utf-8-sig')
        
        # 按问题分组计算指标
        results = {"BGE": [], "Qwen": []}
        questions = df['问题'].unique()
        
        for question in questions:
            question_data = df[df['问题'] == question]
            gold_scores = question_data['黄金标准评分'].tolist()
            
            # BGE评估
            bge_scores = question_data['BGE重排分数'].tolist()
            bge_rankings = self._calculate_rankings(bge_scores)
            bge_reranked = [gold_scores[idx] for idx in bge_rankings[:self.eval_k]]
            
            bge_ndcg = calculate_ndcg(bge_reranked, gold_scores, self.eval_k)
            bge_avg_rel = calculate_average_relevance(bge_reranked, self.eval_k)
            bge_hit_rate = calculate_hit_rate(bge_reranked, RELEVANCE_THRESHOLD, self.eval_k)
            
            results["BGE"].append({
                "question": question,
                "ndcg": bge_ndcg,
                "avg_relevance": bge_avg_rel,
                "hit_rate": bge_hit_rate
            })
            
            # Qwen评估
            qwen_scores = question_data['Qwen重排分数'].tolist()
            qwen_rankings = self._calculate_rankings(qwen_scores)
            qwen_reranked = [gold_scores[idx] for idx in qwen_rankings[:self.eval_k]]
            
            qwen_ndcg = calculate_ndcg(qwen_reranked, gold_scores, self.eval_k)
            qwen_avg_rel = calculate_average_relevance(qwen_reranked, self.eval_k)
            qwen_hit_rate = calculate_hit_rate(qwen_reranked, RELEVANCE_THRESHOLD, self.eval_k)
            
            results["Qwen"].append({
                "question": question,
                "ndcg": qwen_ndcg,
                "avg_relevance": qwen_avg_rel,
                "hit_rate": qwen_hit_rate
            })
        
        # 计算平均指标
        avg_results = {}
        for model in ["BGE", "Qwen"]:
            avg_results[model] = {
                "avg_ndcg": np.mean([r["ndcg"] for r in results[model]]),
                "avg_relevance": np.mean([r["avg_relevance"] for r in results[model]]),
                "avg_hit_rate": np.mean([r["hit_rate"] for r in results[model]])
            }
        
        # 打印结果
        print(f"\n📈 平均评估指标 (Top{self.eval_k}):")
        print(f"{'指标':<15} {'BGE':<12} {'Qwen':<12} {'提升':<12}")
        print("-" * 55)
        
        metrics_names = [
            ("NDCG@5", "avg_ndcg"),
            ("平均相关性", "avg_relevance"), 
            ("命中率", "avg_hit_rate")
        ]
        
        for display_name, key in metrics_names:
            bge_val = avg_results["BGE"][key]
            qwen_val = avg_results["Qwen"][key]
            improvement = qwen_val - bge_val
            print(f"{display_name:<15} {bge_val:<12.4f} {qwen_val:<12.4f} {improvement:<12.4f}")
        
        # 生成汇总图表
        self._generate_summary_visualization(results, avg_results)
        
        # 保存详细结果
        output_file = os.path.join(self.output_dir, "final_evaluation_results.json")
        with open(output_file, 'w', encoding='utf-8') as f:
            json.dump({
                "detailed_results": results,
                "average_results": avg_results,
                "config": {
                    "eval_top_k": self.eval_k,
                    "retrieval_top_k": self.top_k,
                    "relevance_threshold": RELEVANCE_THRESHOLD
                }
            }, f, ensure_ascii=False, indent=2)
        
        print(f"\n📄 详细结果已保存: {output_file}")
        
        return avg_results
    
    def regenerate_metrics_from_csv(self, csv_file_path: str = None, new_eval_k: int = None, new_threshold: float = None):
        """
        基于现有的reranking_results.csv文件重新生成评估指标和可视化
        
        参数:
            csv_file_path: CSV文件路径，如果为None则使用默认路径
            new_eval_k: 新的评估TopK值，如果为None则使用当前设置
            new_threshold: 新的相关性阈值，如果为None则使用当前设置
        """
        print(f"\n{'='*60}")
        print(f"📊 基于CSV文件重新生成评估指标")
        print(f"{'='*60}")
        
        # 确定CSV文件路径
        if csv_file_path is None:
            csv_file_path = self.rerank_results_file
        
        if not os.path.exists(csv_file_path):
            raise FileNotFoundError(f"CSV文件不存在: {csv_file_path}")
        
        # 更新评估参数
        if new_eval_k is not None:
            old_eval_k = self.eval_k
            self.eval_k = new_eval_k
            print(f"📝 评估TopK从 {old_eval_k} 更新为 {new_eval_k}")
        
        if new_threshold is not None:
            global RELEVANCE_THRESHOLD
            old_threshold = RELEVANCE_THRESHOLD
            RELEVANCE_THRESHOLD = new_threshold
            print(f"📝 相关性阈值从 {old_threshold} 更新为 {new_threshold}")
        
        print(f"📂 读取CSV文件: {csv_file_path}")
        
        # 读取CSV文件
        try:
            df = pd.read_csv(csv_file_path, encoding='utf-8-sig')
            print(f"✅ 成功读取 {len(df)} 行数据")
        except Exception as e:
            raise Exception(f"读取CSV文件失败: {str(e)}")
        
        # 验证CSV文件格式
        required_columns = ["问题", "黄金标准评分", "BGE重排分数", "Qwen重排分数"]
        missing_columns = [col for col in required_columns if col not in df.columns]
        if missing_columns:
            raise ValueError(f"CSV文件缺少必要的列: {missing_columns}")
        
        # 按问题分组重新计算指标
        results = {"BGE": [], "Qwen": []}
        questions = df['问题'].unique()
        
        print(f"🔄 重新计算 {len(questions)} 个问题的指标...")
        
        for i, question in enumerate(questions):
            print(f"  处理问题 {i+1}/{len(questions)}: {question[:50]}...")
            
            question_data = df[df['问题'] == question]
            gold_scores = question_data['黄金标准评分'].tolist()
            
            # BGE评估
            bge_scores = question_data['BGE重排分数'].tolist()
            bge_rankings = self._calculate_rankings(bge_scores)
            bge_reranked = [gold_scores[idx] for idx in bge_rankings[:self.eval_k]]
            
            bge_ndcg = calculate_ndcg(bge_reranked, gold_scores, self.eval_k)
            bge_avg_rel = calculate_average_relevance(bge_reranked, self.eval_k)
            bge_hit_rate = calculate_hit_rate(bge_reranked, RELEVANCE_THRESHOLD, self.eval_k)
            
            results["BGE"].append({
                "question": question,
                "ndcg": bge_ndcg,
                "avg_relevance": bge_avg_rel,
                "hit_rate": bge_hit_rate
            })
            
            # Qwen评估
            qwen_scores = question_data['Qwen重排分数'].tolist()
            qwen_rankings = self._calculate_rankings(qwen_scores)
            qwen_reranked = [gold_scores[idx] for idx in qwen_rankings[:self.eval_k]]
            
            qwen_ndcg = calculate_ndcg(qwen_reranked, gold_scores, self.eval_k)
            qwen_avg_rel = calculate_average_relevance(qwen_reranked, self.eval_k)
            qwen_hit_rate = calculate_hit_rate(qwen_reranked, RELEVANCE_THRESHOLD, self.eval_k)
            
            results["Qwen"].append({
                "question": question,
                "ndcg": qwen_ndcg,
                "avg_relevance": qwen_avg_rel,
                "hit_rate": qwen_hit_rate
            })
            
            # 重新生成单个问题的可视化
            self._generate_question_visualization_from_data(
                question, i, gold_scores, 
                bge_scores, qwen_scores, bge_rankings, qwen_rankings
            )
        
        # 计算平均指标
        avg_results = {}
        for model in ["BGE", "Qwen"]:
            avg_results[model] = {
                "avg_ndcg": np.mean([r["ndcg"] for r in results[model]]),
                "avg_relevance": np.mean([r["avg_relevance"] for r in results[model]]),
                "avg_hit_rate": np.mean([r["hit_rate"] for r in results[model]])
            }
        
        # 打印结果
        print(f"\n📈 重新计算的平均评估指标 (Top{self.eval_k}):")
        print(f"{'指标':<15} {'BGE':<12} {'Qwen':<12} {'差值':<12}")
        print("-" * 55)
        
        metrics_names = [
            ("NDCG@5", "avg_ndcg"),
            ("平均相关性", "avg_relevance"), 
            ("命中率", "avg_hit_rate")
        ]
        
        for display_name, key in metrics_names:
            bge_val = avg_results["BGE"][key]
            qwen_val = avg_results["Qwen"][key]
            diff = qwen_val - bge_val
            print(f"{display_name:<15} {bge_val:<12.4f} {qwen_val:<12.4f} {diff:<12.4f}")
        
        # 生成新的汇总图表
        self._generate_summary_visualization(results, avg_results)
        
        # 保存更新后的详细结果
        output_file = os.path.join(self.output_dir, "regenerated_evaluation_results.json")
        with open(output_file, 'w', encoding='utf-8') as f:
            json.dump({
                "detailed_results": results,
                "average_results": avg_results,
                "config": {
                    "eval_top_k": self.eval_k,
                    "retrieval_top_k": self.top_k,
                    "relevance_threshold": RELEVANCE_THRESHOLD,
                    "regenerated": True,
                    "source_csv": csv_file_path
                }
            }, f, ensure_ascii=False, indent=2)
        
        print(f"\n📄 重新生成的结果已保存: {output_file}")
        print(f"📊 可视化图表已更新")
        
        return avg_results

    def _generate_question_visualization_from_data(self, question: str, question_idx: int,
                                                  gold_scores: List[float], bge_scores: List[float], qwen_scores: List[float],
                                                  bge_rankings: List[int], qwen_rankings: List[int]):
        """基于数据重新生成单个问题的可视化图表（重新生成模式专用）"""
        plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'Arial Unicode MS', 'DejaVu Sans']
        plt.rcParams['axes.unicode_minus'] = False
        
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
        
        # 图1: 分数分布折线图
        relevance_labels = ['0-0.2', '0.2-0.4', '0.4-0.6', '0.6-0.8', '0.8-1.0']
        
        bge_by_group = [[] for _ in range(5)]
        qwen_by_group = [[] for _ in range(5)]
        
        for i, gold_score in enumerate(gold_scores):
            bin_idx = min(int(gold_score * 5), 4)
            bge_by_group[bin_idx].append(bge_scores[i])
            qwen_by_group[bin_idx].append(qwen_scores[i])
        
        bge_avg = []
        qwen_avg = []
        gold_avg = []
        
        for i in range(5):
            if bge_by_group[i]:
                bge_avg.append(sum(bge_by_group[i]) / len(bge_by_group[i]))
                qwen_avg.append(sum(qwen_by_group[i]) / len(qwen_by_group[i]))
                gold_avg.append((i * 0.2) + 0.1)
            else:
                bge_avg.append(0)
                qwen_avg.append(0)
                gold_avg.append((i * 0.2) + 0.1)
        
        x = np.arange(len(relevance_labels))
        ax1.plot(x, bge_avg, marker='o', linewidth=2, label='BGE重排', color='blue')
        ax1.plot(x, qwen_avg, marker='s', linewidth=2, label='Qwen重排', color='red')
        ax1.plot(x, gold_avg, marker='x', linestyle='--', linewidth=1, label='理想分数', color='gold')
        
        for i in range(len(x)):
            ax1.text(x[i], bge_avg[i] + 0.02, f"{bge_avg[i]:.2f}", ha='center', va='bottom', fontsize=9)
            ax1.text(x[i], qwen_avg[i] - 0.02, f"{qwen_avg[i]:.2f}", ha='center', va='top', fontsize=9)
        
        ax1.set_xticks(x)
        ax1.set_xticklabels(relevance_labels)
        ax1.set_xlabel('黄金标准相关性得分范围', fontsize=11)
        ax1.set_ylabel('平均模型分数', fontsize=11)
        ax1.set_title(f'分数分布对比 - 问题{question_idx + 1} [重新生成]', fontsize=12, fontweight='bold')
        ax1.set_ylim(0, 1.1)
        ax1.grid(True, linestyle='--', alpha=0.7)
        ax1.legend()
        
        # 图2: 评估指标对比
        top_k_eval = min(self.eval_k, len(gold_scores))
        bge_top_scores = [gold_scores[idx] for idx in bge_rankings[:top_k_eval]]
        qwen_top_scores = [gold_scores[idx] for idx in qwen_rankings[:top_k_eval]]
        
        bge_ndcg = calculate_ndcg(bge_top_scores, gold_scores, top_k_eval)
        qwen_ndcg = calculate_ndcg(qwen_top_scores, gold_scores, top_k_eval)
        
        bge_avg_rel = calculate_average_relevance(bge_top_scores, top_k_eval)
        qwen_avg_rel = calculate_average_relevance(qwen_top_scores, top_k_eval)
        
        bge_hit_rate = calculate_hit_rate(bge_top_scores, RELEVANCE_THRESHOLD, top_k_eval)
        qwen_hit_rate = calculate_hit_rate(qwen_top_scores, RELEVANCE_THRESHOLD, top_k_eval)
        
        metrics = [f'NDCG@{top_k_eval}', '平均相关性', '命中率']
        bge_values = [bge_ndcg, bge_avg_rel, bge_hit_rate]
        qwen_values = [qwen_ndcg, qwen_avg_rel, qwen_hit_rate]
        
        x_pos = range(len(metrics))
        width = 0.35
        ax2.bar([p - width/2 for p in x_pos], bge_values, width, label='BGE', color='blue', alpha=0.7)
        ax2.bar([p + width/2 for p in x_pos], qwen_values, width, label='Qwen', color='red', alpha=0.7)
        ax2.set_xlabel('评估指标', fontsize=11)
        ax2.set_ylabel('数值', fontsize=11)
        ax2.set_title('评估指标对比 [重新生成]', fontsize=12, fontweight='bold')
        ax2.set_xticks(x_pos)
        ax2.set_xticklabels(metrics)
        ax2.legend()
        ax2.grid(True, alpha=0.3)
        
        for i, (bge_val, qwen_val) in enumerate(zip(bge_values, qwen_values)):
            ax2.text(i - width/2, bge_val + 0.01, f'{bge_val:.3f}', ha='center', va='bottom', fontsize=9)
            ax2.text(i + width/2, qwen_val + 0.01, f'{qwen_val:.3f}', ha='center', va='bottom', fontsize=9)
        
        ax2.set_ylim(0, max(max(bge_values), max(qwen_values)) * 1.15)
        
        plt.tight_layout()
        
        # 保存图表
        safe_question = "".join(c for c in question if c.isalnum() or c in (' ', '-', '_')).rstrip()[:50]
        output_path = os.path.join(self.output_dir, "visualizations", f"regenerated_question_{question_idx + 1:03d}_{safe_question}.png")
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"    📊 可视化图表已保存: {output_path}")
    
    def _generate_summary_visualization(self, results: Dict, avg_results: Dict):
        """生成汇总可视化图表"""
        plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'Arial Unicode MS', 'DejaVu Sans']
        plt.rcParams['axes.unicode_minus'] = False
        
        # 创建单个图表 - 平均评估指标对比
        fig, ax = plt.subplots(1, 1, figsize=(10, 6))
        
        # 平均指标对比
        metrics = ['NDCG@5', '平均相关性', '命中率']
        bge_values = [avg_results["BGE"]["avg_ndcg"], avg_results["BGE"]["avg_relevance"], avg_results["BGE"]["avg_hit_rate"]]
        qwen_values = [avg_results["Qwen"]["avg_ndcg"], avg_results["Qwen"]["avg_relevance"], avg_results["Qwen"]["avg_hit_rate"]]
        
        x_pos = range(len(metrics))
        width = 0.35
        ax.bar([p - width/2 for p in x_pos], bge_values, width, label='BGE', color='blue', alpha=0.7)
        ax.bar([p + width/2 for p in x_pos], qwen_values, width, label='Qwen', color='red', alpha=0.7)
        ax.set_xlabel('评估指标', fontsize=12)
        ax.set_ylabel('数值', fontsize=12)
        ax.set_title('平均评估指标对比', fontsize=14, fontweight='bold')
        ax.set_xticks(x_pos)
        ax.set_xticklabels(metrics)
        ax.legend(fontsize=11)
        ax.grid(True, alpha=0.3)
        
        # 添加数值标签
        for i, (bge_val, qwen_val) in enumerate(zip(bge_values, qwen_values)):
            ax.text(i - width/2, bge_val + 0.01, f'{bge_val:.3f}', ha='center', va='bottom', fontsize=10)
            ax.text(i + width/2, qwen_val + 0.01, f'{qwen_val:.3f}', ha='center', va='bottom', fontsize=10)
        
        # 设置Y轴范围，留出标签空间
        ax.set_ylim(0, max(max(bge_values), max(qwen_values)) * 1.15)
        
        plt.tight_layout()
        
        # 保存汇总图表
        output_path = os.path.join(self.output_dir, "summary_evaluation_results.png")
        plt.savefig(output_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"📊 汇总图表已保存: {output_path}")
    
    def run(self, questions_input: str, input_type: str = "auto"):
        """
        运行完整的优化工作流
        
        参数:
            questions_input: 问题文件路径或单个问题字符串
            input_type: 输入类型，"file"=文件路径，"str"=问题字符串，"auto"=自动判断
        """
        start_time = time.time()
        
        # 读取问题
        questions = self.read_questions(questions_input, input_type)
        
        # 逐个处理问题
        for i, question in enumerate(questions):
            try:
                self.process_single_question(question, i, len(questions))
            except Exception as e:
                print(f"❌ 处理问题 {i+1} 失败: {str(e)}")
                continue
        
        # 计算最终指标
        if len(questions) > 0:
            final_results = self.calculate_final_metrics()
        
        total_time = time.time() - start_time
        print(f"\n🎉 优化工作流完成！")
        print(f"⏱️ 总耗时: {total_time:.2f}秒")
        print(f"📁 结果保存在: {self.output_dir}")


def main():
    """命令行入口函数"""
    parser = argparse.ArgumentParser(description="优化的重排模型评估工作流")
    
    # 添加模式选择
    parser.add_argument("--mode", type=str, choices=["run", "regenerate"], 
                       default="run", help="运行模式: run=完整流程, regenerate=从CSV重新生成指标")
    
    # 原有参数
    parser.add_argument("--questions", type=str, 
                       help="问题文件路径或单个问题字符串（run模式必需）")
    parser.add_argument("--type", type=str, choices=["file", "str", "auto"], 
                       default="auto", help="输入类型: file=文件路径, str=问题字符串, auto=自动判断")
    parser.add_argument("--output-dir", type=str, default="results_optimized", 
                       help="输出目录")
    parser.add_argument("--top-k", type=int, default=50, 
                       help="每个问题检索的知识块数量")
    
    # 重新生成模式的参数
    parser.add_argument("--csv-file", type=str, 
                       help="CSV文件路径（regenerate模式）")
    parser.add_argument("--eval-k", type=int, 
                       help="新的评估TopK值（regenerate模式）")
    parser.add_argument("--threshold", type=float, 
                       help="新的相关性阈值（regenerate模式）")
    
    args = parser.parse_args()
    
    # 创建工作流实例
    workflow = OptimizedWorkflow(
        output_dir=args.output_dir,
        top_k=args.top_k
    )
    
    if args.mode == "run":
        # 完整运行模式
        if not args.questions:
            parser.error("run模式需要指定--questions参数")
        workflow.run(args.questions, args.type)
        
    elif args.mode == "regenerate":
        # 重新生成模式
        print("🔄 重新生成模式")
        workflow.regenerate_metrics_from_csv(
            csv_file_path=args.csv_file,
            new_eval_k=args.eval_k,
            new_threshold=args.threshold
        )


if __name__ == "__main__":
    main() 