# eval_script.py
from evalscope import TaskConfig, run_task
import json
import time


def setup_evaluation():
    """配置EvalScope评估任务"""

    task_cfg = TaskConfig(
        model='./qwen3-distilled-model',  # 使用微调后的模型
        api_url='http://127.0.0.1:8000/v1/chat/completions',  # vLLM服务地址
        eval_type='service',
        datasets=['data_collection'],
        dataset_args={
            'data_collection': {
                'dataset_id': 'modelscope/EvalScope-Qwen3',
                'filters': {'remove_until': '</think>'}
            }
        },
        # 添加更多评估参数
        few_shot=5,
        max_tokens=1024,
        temperature=0.1,  # 评估时使用较低温度
        metrics=['accuracy', 'bleu', 'rouge'],  # 评估指标
    )

    return task_cfg


def run_comprehensive_evaluation():
    """运行全面评估"""

    # 等待vLLM服务启动
    print("等待vLLM服务启动...")
    time.sleep(10)

    # 配置评估任务
    task_cfg = setup_evaluation()

    print("开始评估...")

    # 运行评估
    try:
        results = run_task(task_cfg)

        # 保存评估结果
        with open('evaluation_results.json', 'w', encoding='utf-8') as f:
            json.dump(results, f, ensure_ascii=False, indent=2)

        print("评估完成！结果已保存到 evaluation_results.json")

        # 打印关键指标
        if 'overall' in results:
            print("\n=== 评估结果汇总 ===")
            for metric, score in results['overall'].items():
                print(f"{metric}: {score:.4f}")

    except Exception as e:
        print(f"评估过程中出现错误: {e}")


def compare_with_baseline():
    """与基线模型对比"""

    # 基线模型评估配置
    baseline_cfg = TaskConfig(
        model='Qwen/Qwen3-8B',  # 原始模型作为基线
        api_url='http://127.0.0.1:8000/v1/chat/completions',
        eval_type='service',
        datasets=['data_collection'],
        dataset_args={
            'data_collection': {
                'dataset_id': 'modelscope/EvalScope-Qwen3',
                'filters': {'remove_until': '</think>'}
            }
        },
        few_shot=5,
        max_tokens=1024,
        temperature=0.1,
    )

    print("运行基线模型评估...")
    baseline_results = run_task(baseline_cfg)

    return baseline_results


if __name__ == "__main__":
    # 运行微调模型评估
    run_comprehensive_evaluation()

    # 可选：与基线对比
    # baseline_results = compare_with_baseline()