"""
重排模型评估工具主函数 - 支持带分数的新格式
"""
import argparse
import json
import os
from typing import Dict, List, Union, Any
import pandas as pd
import numpy as np

# 导入路径
from src.config import MODELS, TOP_K, RELEVANCE_THRESHOLD
from src.evaluation import evaluate_reranking_model
from src.visualization import plot_metric_comparison, plot_score_distribution

def load_data(data_path: str):
    """
    加载评估数据
    
    参数:
        data_path: 数据文件路径（JSON格式）
    
    返回:
        加载的数据字典
    """
    with open(data_path, 'r', encoding='utf-8') as f:
        return json.load(f)

def print_separator(title=""):
    """打印分隔线"""
    width = 80
    if title:
        print("\n" + "=" * width)
        print(f"{title:^{width}}")
        print("=" * width)
    else:
        print("\n" + "-" * width)

def print_dict_sample(data, name="数据", max_items=2):
    """打印字典的样例数据"""
    print(f"\n{name}示例 (展示前{max_items}项):")
    if isinstance(data, dict):
        for i, (key, value) in enumerate(data.items()):
            if i >= max_items:
                break
            print(f"  - {key}: {value}")
    else:
        print(f"  {data}")
    print(f"共有{len(data)}项数据")

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='重排模型评估工具')
    parser.add_argument('--data', type=str, required=True, help='包含问题、知识片段和黄金标准评分的JSON数据文件路径')
    parser.add_argument('--bge-scores', type=str, required=True, help='BGE重排模型分数结果的JSON文件路径(包含scores和rankings)')
    parser.add_argument('--qwen-scores', type=str, required=True, help='Qwen重排模型分数结果的JSON文件路径(包含scores和rankings)')
    parser.add_argument('--output-dir', type=str, default='results', help='输出结果和图表的目录')
    parser.add_argument('--bad-cases', type=str, help='Bad Case列表文件路径（可选）')
    parser.add_argument('--verbose', '-v', action='store_true', help='是否显示详细信息')
    
    args = parser.parse_args()
    verbose = args.verbose
    
    print_separator("重排模型评估工具")
    print(f"评估参数:")
    print(f"  - 评估Top-K: {TOP_K}")
    print(f"  - 相关性阈值: {RELEVANCE_THRESHOLD} (大于等于此值被视为相关)")
    
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    print(f"\n输出目录: {os.path.abspath(args.output_dir)}")
    
    # 加载数据
    print_separator("数据加载阶段")
    print("加载黄金标准数据...")
    data = load_data(args.data)
    if verbose:
        # 打印样例数据
        sample_q_id = list(data.keys())[0]
        print(f"\n黄金标准数据示例 (问题ID: {sample_q_id}):")
        print(f"  问题: {data[sample_q_id]['question']}")
        print(f"  知识片段数量: {len(data[sample_q_id].get('passages', []))}")
        print(f"  黄金标准得分: {data[sample_q_id]['gold_scores'][:5]}... (共{len(data[sample_q_id]['gold_scores'])}个)")
    
    # 加载模型结果
    print("\n加载BGE重排模型结果...")
    bge_results = load_data(args.bge_scores)
    if verbose:
        sample_q_id = list(bge_results.keys())[0]
        print(f"\nBGE重排结果示例 (问题ID: {sample_q_id}):")
        print(f"  排名: {bge_results[sample_q_id]['rankings'][:5]}... (共{len(bge_results[sample_q_id]['rankings'])}个)")
        print(f"  分数: {[round(s, 2) for s in bge_results[sample_q_id]['scores'][:5]]}... (共{len(bge_results[sample_q_id]['scores'])}个)")
    
    print("\n加载Qwen重排模型结果...")
    qwen_results = load_data(args.qwen_scores)
    if verbose:
        sample_q_id = list(qwen_results.keys())[0]
        print(f"\nQwen重排结果示例 (问题ID: {sample_q_id}):")
        print(f"  排名: {qwen_results[sample_q_id]['rankings'][:5]}... (共{len(qwen_results[sample_q_id]['rankings'])}个)")
        print(f"  分数: {[round(s, 2) for s in qwen_results[sample_q_id]['scores'][:5]]}... (共{len(qwen_results[sample_q_id]['scores'])}个)")
    
    # 提取黄金标准得分
    gold_standard_scores = {q_id: data[q_id]['gold_scores'] for q_id in data}
    questions = {q_id: data[q_id]['question'] for q_id in data}
    
    # 评估模型
    print_separator("模型评估阶段")
    print("评估BGE重排模型...")
    bge_eval_results = evaluate_reranking_model(gold_standard_scores, bge_results, TOP_K, verbose=verbose)
    
    print("\n评估Qwen重排模型...")
    qwen_eval_results = evaluate_reranking_model(gold_standard_scores, qwen_results, TOP_K, verbose=verbose)
    
    # 合并结果
    model_eval_results = {
        'bge_rerank': bge_eval_results,
        'qwen3_4b': qwen_eval_results
    }
    
    # 打印评估结果摘要
    print_separator("评估结果摘要")
    metrics = ['ndcg@5', 'avg_relevance@5', 'hit_rate@5']
    
    # 计算平均指标
    avg_metrics = {}
    for model_name, model_data in model_eval_results.items():
        avg_metrics[model_name] = {}
        for metric in metrics:
            values = []
            for q_id, q_metrics in model_data.items():
                if metric in q_metrics:
                    values.append(q_metrics[metric])
            if values:
                avg_metrics[model_name][metric] = sum(values) / len(values)
            else:
                avg_metrics[model_name][metric] = 0.0
    
    # 打印平均指标
    print("模型平均指标:")
    print(f"{'指标':<20} {'BGE重排':<15} {'Qwen重排':<15}")
    print("-" * 50)
    for metric in metrics:
        bge_value = avg_metrics['bge_rerank'][metric]
        qwen_value = avg_metrics['qwen3_4b'][metric]
        print(f"{metric:<20} {bge_value:<15.4f} {qwen_value:<15.4f}")
    
    # 如果verbose，打印每个问题的详细指标
    if verbose:
        print("\n每个问题的详细指标:")
        for q_id in gold_standard_scores.keys():
            print(f"\n问题: {questions[q_id]}")
            print(f"{'指标':<20} {'BGE重排':<15} {'Qwen重排':<15}")
            print("-" * 50)
            for metric in metrics:
                bge_value = bge_eval_results[q_id].get(metric, 0)
                qwen_value = qwen_eval_results[q_id].get(metric, 0)
                print(f"{metric:<20} {bge_value:<15.4f} {qwen_value:<15.4f}")
    
    # 保存评估结果
    result_path = os.path.join(args.output_dir, 'evaluation_results.json')
    with open(result_path, 'w', encoding='utf-8') as f:
        json.dump(model_eval_results, f, ensure_ascii=False, indent=2)
    print(f"\n评估结果已保存到: {result_path}")
    
    # 可视化结果对比
    print_separator("可视化生成阶段")
    print("生成模型对比图表...")
    
    for metric in metrics:
        # 转换model_results格式以适配可视化函数
        adapted_results = {}
        for model_name, model_data in model_eval_results.items():
            # 为每个模型创建一个简化的字典，键为问题ID，值为相应指标值
            adapted_results[model_name] = {}
            for question_id, metrics_dict in model_data.items():
                if metric in metrics_dict:
                    adapted_results[model_name][question_id] = metrics_dict[metric]
        
        output_path = os.path.join(args.output_dir, f'{metric}_comparison.png')
        plot_metric_comparison(
            adapted_results,
            metric, 
            output_path=output_path
        )
        print(f"  - 已生成 {metric} 对比图: {output_path}")
    
    # 分析Bad Cases
    if args.bad_cases:
        print_separator("Bad Case分析")
        with open(args.bad_cases, 'r', encoding='utf-8') as f:
            bad_case_ids = json.load(f)
        
        print(f"分析 {len(bad_case_ids)} 个Bad Cases...")
        
        for case_id in bad_case_ids:
            if case_id in gold_standard_scores and case_id in bge_results and case_id in qwen_results:
                # 提取排名
                bge_rankings = bge_results[case_id]["rankings"]
                qwen_rankings = qwen_results[case_id]["rankings"]
                
                # 提取分数
                bge_scores = bge_results[case_id]["scores"]
                qwen_scores = qwen_results[case_id]["scores"]
                
                # 如果verbose，打印详细信息
                if verbose:
                    print(f"\nBad Case: {case_id} - {questions[case_id]}")
                    print("BGE重排Top-5索引:", bge_rankings[:5])
                    print("Qwen重排Top-5索引:", qwen_rankings[:5])
                    print("BGE重排Top-5得分:", [gold_standard_scores[case_id][idx] for idx in bge_rankings[:5]])
                    print("Qwen重排Top-5得分:", [gold_standard_scores[case_id][idx] for idx in qwen_rankings[:5]])
                
                # 生成分数分布对比图
                dist_output_path = os.path.join(args.output_dir, f'score_distribution_{case_id}.png')
                plot_score_distribution(
                    bge_scores,
                    qwen_scores,
                    gold_standard_scores[case_id],
                    case_id,
                    output_path=dist_output_path
                )
                print(f"  - 已生成分数分布对比图: {dist_output_path}")
            else:
                print(f"  - 警告: Bad Case {case_id} 在数据中不存在，已跳过")
    
    print_separator("评估完成")
    print(f"所有结果已保存在 {os.path.abspath(args.output_dir)} 目录")
    print("可以查看以下文件:")
    print(f"  - evaluation_results.json: 详细的评估指标")
    for metric in metrics:
        print(f"  - {metric}_comparison.png: {metric}指标对比图")
    if args.bad_cases:
        print(f"  - score_distribution_*.png: Bad Case中分数分布对比图")

if __name__ == "__main__":
    main() 