#!/usr/bin/env python3
"""
对比实验脚本：比较MCTS和进化算法在Alpha-SQL上的性能

运行10个任务，对比：
1. 执行时间
2. 生成的SQL质量（简化评估）
"""

import os
import sys
import time
import json
import pickle
import argparse
import subprocess
from pathlib import Path
from typing import Dict, List, Tuple
from tqdm import tqdm

# 确保可以导入alphasql模块
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

def load_tasks(tasks_file_path: str) -> List:
    """加载任务列表"""
    with open(tasks_file_path, "rb") as f:
        tasks = pickle.load(f)
    return tasks

def run_single_task(method: str, config_path: str, task_id: int, rollouts: int = 4) -> Tuple[float, str]:
    """
    运行单个任务，返回执行时间和结果文件路径
    """
    tqdm.write(f"  → 开始运行 {method.upper()} 方法...")
    start_time = time.time()

    if method == "mcts":
        # 调用run_one_task.py
        cmd = [
            sys.executable, "tools/run_one_task.py", config_path,
            "--rollouts", str(rollouts), "--task-id", str(task_id)
        ]
        expected_result_file = f"results/Qwen2.5-Coder-32B-Instruct/bird/dev/{task_id}.pkl"
    elif method == "evolutionary":
        # 调用run_one_task.py，但使用evolutionary配置
        cmd = [
            sys.executable, "tools/run_one_task.py", config_path,
            "--task-id", str(task_id)
        ]
        expected_result_file = f"results/Qwen2.5-Coder-32B-Instruct/bird/dev_evolutionary/{task_id}.pkl"
    else:
        raise ValueError(f"Unknown method: {method}")

    # 静默运行,避免干扰进度条
    try:
        # 设置环境变量
        env = os.environ.copy()
        env['OPENAI_API_KEY'] = 'deepseek-chat'
        env['PYTHONPATH'] = 'd:\\Alpha-SQL'

        result = subprocess.run(cmd, capture_output=True, text=True, cwd="d:\\Alpha-SQL", env=env)
        end_time = time.time()
        execution_time = end_time - start_time

        if result.returncode != 0:
            tqdm.write(f"  ❌ {method.upper()} 任务 {task_id} 执行失败")
            tqdm.write(f"  → 错误输出: {result.stderr[:200]}...")  # 只显示前200字符
            return execution_time, None

        result_path = Path("d:\\Alpha-SQL") / expected_result_file
        if result_path.exists():
            file_size = result_path.stat().st_size
            tqdm.write(f"  ✓ {method.upper()} 完成 ({execution_time:.1f}秒, {file_size} bytes)")
            return execution_time, str(result_path)
        else:
            tqdm.write(f"  ❌ 结果文件不存在: {result_path}")
            return execution_time, None

    except Exception as e:
        end_time = time.time()
        execution_time = end_time - start_time
        tqdm.write(f"  ❌ {method.upper()} 任务 {task_id} 发生异常: {e}")
        return execution_time, None

def collect_sql_results(result_files: Dict[str, str]) -> Dict[str, str]:
    """介结果文件中提取SQL"""
    tqdm.write(f"  → 提取SQL结果...")
    sql_results = {}

    for method, result_file in result_files.items():
        tqdm.write(f"    → 处理 {method.upper()} 结果...")
        if result_file and Path(result_file).exists():
            try:
                with open(result_file, "rb") as f:
                    result_data = pickle.load(f)

                tqdm.write(f"    → 结果数据类型: {type(result_data)}")

                # 提取最好的SQL (静默处理)
                sql = None
                if isinstance(result_data, list) and len(result_data) > 0:
                    # MCTS format: list of paths, each path is a list of nodes
                    if len(result_data) > 0 and len(result_data[-1]) > 0:
                        # Get the last node from the last path
                        end_node = result_data[-1][-1]
                        if hasattr(end_node, 'final_sql_query') and end_node.final_sql_query:
                            sql = end_node.final_sql_query
                        elif hasattr(end_node, 'sql_query') and end_node.sql_query:
                            sql = end_node.sql_query
                elif hasattr(result_data, 'final_sql_query') and result_data.final_sql_query:
                    # Evolutionary format: single node
                    sql = result_data.final_sql_query
                elif hasattr(result_data, 'sql_query') and result_data.sql_query:
                    sql = result_data.sql_query
                elif isinstance(result_data, list) and len(result_data) == 1 and hasattr(result_data[0], 'final_sql_query'):
                    # Evolutionary format: single-item list with node
                    sql = result_data[0].final_sql_query
                elif isinstance(result_data, list) and len(result_data) == 1 and hasattr(result_data[0], 'sql_query'):
                    # Evolutionary format: single-item list with node
                    sql = result_data[0].sql_query

                if sql:
                    sql_results[method] = sql
                else:
                    tqdm.write(f"    ⚠ 无法从 {method.upper()} 结果文件中提取SQL")
                    sql_results[method] = None

            except Exception as e:
                print(f"    ❌ 读取 {method.upper()} 结果文件时出错: {e}")
                sql_results[method] = None
        else:
            print(f"    ❌ {method.upper()} 结果文件不存在")
            sql_results[method] = None

    return sql_results

def simple_sql_quality_check(sql: str) -> Dict[str, int]:
    """简单的SQL质量检查"""
    if not sql:
        return {
            "length": 0,
            "has_select": 0,
            "has_where": 0,
            "has_join": 0,
            "has_group_by": 0,
            "has_order_by": 0,
            "has_limit": 0,
            "has_distinct": 0,
            "complexity_score": 0
        }

    sql_upper = sql.upper()

    # 基础检查
    checks = {
        "length": len(sql),
        "has_select": 1 if "SELECT" in sql_upper else 0,
        "has_where": 1 if "WHERE" in sql_upper else 0,
        "has_join": 1 if "JOIN" in sql_upper else 0,
        "has_group_by": 1 if "GROUP BY" in sql_upper else 0,
        "has_order_by": 1 if "ORDER BY" in sql_upper else 0,
        "has_limit": 1 if "LIMIT" in sql_upper else 0,
        "has_distinct": 1 if "DISTINCT" in sql_upper else 0,
    }

    # 计算复杂度分数（基于SQL复杂性）
    complexity_score = sum(checks.values()) - checks["length"] / 100  # 长度惩罚
    checks["complexity_score"] = max(0, int(complexity_score))

    return checks

def main():
    parser = argparse.ArgumentParser(description="Run comparison experiment between MCTS and Evolutionary algorithms")
    parser.add_argument("--mcts-config", default="config/qwen32b_bird_dev.yaml", help="MCTS config file")
    parser.add_argument("--evolutionary-config", default="config/qwen32b_bird_dev_evolutionary.yaml", help="Evolutionary config file")
    parser.add_argument("--tasks", nargs='+', type=int, default=list(range(10)), help="Task IDs to run (default: 0-9)")
    parser.add_argument("--rollouts", type=int, default=4, help="Number of rollouts for MCTS")
    parser.add_argument("--output-prefix", default="comparison_experiment", help="Output file prefix")

    args = parser.parse_args()

    # 加载任务
    tasks_file = "data/preprocessed/bird/dev/dev/tasks.pkl"
    if not Path(tasks_file).exists():
        print(f"Tasks file not found: {tasks_file}")
        print("Please ensure the data is properly prepared.")
        sys.exit(1)

    tasks = load_tasks(tasks_file)

    print(f"Running comparison experiment with {len(args.tasks)} tasks")
    print(f"MCTS config: {args.mcts_config}")
    print(f"Evolutionary config: {args.evolutionary_config}")
    print(f"Tasks: {args.tasks}")
    print("=" * 80)

    results = []
    total_tasks = len(args.tasks)

    print(f"\n🚀 开始对比实验: 共 {total_tasks} 个任务")
    print("=" * 100)
    
    for i, task_id in enumerate(tqdm(args.tasks, desc="总体进度", unit="任务", position=0, ncols=100), 1):
        tqdm.write(f"\n{'='*100}")
        tqdm.write(f"📋 任务 {i}/{total_tasks}: ID={task_id}")
        tqdm.write(f"{'='*100}")

        # 找到对应的任务
        task = next((t for t in tasks if t.question_id == task_id), None)
        if not task:
            tqdm.write(f"❌ 任务 {task_id} 未找到，跳过")
            continue

        db_id = getattr(task, 'db_id', 'unknown')
        question = getattr(task, 'question', f'Task {task_id}')

        tqdm.write(f"📌 数据库: {db_id}")
        tqdm.write(f"📌 问题: {question[:100]}..." if len(question) > 100 else f"📌 问题: {question}")
        tqdm.write("-" * 100)

        # 运行MCTS
        tqdm.write(f"\n🔍 开始 MCTS (rollouts={args.rollouts})")
        mcts_time, mcts_result_file = run_single_task("mcts", args.mcts_config, task_id, args.rollouts)

        # 运行进化算法
        tqdm.write(f"\n🧬 开始进化算法")
        evo_time, evo_result_file = run_single_task("evolutionary", args.evolutionary_config, task_id)

        # 收集结果
        tqdm.write(f"\n📊 收集和分析结果")
        result_files = {
            "mcts": mcts_result_file,
            "evolutionary": evo_result_file
        }

        sql_results = collect_sql_results(result_files)

        # 简单的SQL质量评估
        tqdm.write(f"  → 评估SQL质量")
        mcts_quality = simple_sql_quality_check(sql_results.get("mcts"))
        evo_quality = simple_sql_quality_check(sql_results.get("evolutionary"))

        # 记录结果
        task_result = {
            "task_id": task_id,
            "db_id": db_id,
            "question": question[:100] + "..." if len(question) > 100 else question,
            "mcts_time": mcts_time,
            "evolutionary_time": evo_time,
            "mcts_sql": sql_results.get("mcts"),
            "evolutionary_sql": sql_results.get("evolutionary"),
            "mcts_quality": mcts_quality,
            "evolutionary_quality": evo_quality
        }

        results.append(task_result)

        # 显示任务结果摘要
        tqdm.write(f"\n📈 任务 {task_id} 结果摘要:")
        tqdm.write(f"  MCTS执行时间: {mcts_time:.2f}秒" if mcts_time else "  MCTS执行时间: 失败")
        tqdm.write(f"  进化算法执行时间: {evo_time:.2f}秒" if evo_time else "  进化算法执行时间: 失败")
        tqdm.write(f"  MCTS SQL长度: {mcts_quality['length']}")
        tqdm.write(f"  进化算法SQL长度: {evo_quality['length']}")
        tqdm.write(f"  MCTS包含SELECT: {'是' if mcts_quality['has_select'] else '否'}")
        tqdm.write(f"  进化算法包含SELECT: {'是' if evo_quality['has_select'] else '否'}")
        tqdm.write("-" * 100)

    # 保存汇总结果
    print(f"\n💾 保存实验结果...")
    output_file = f"results/{args.output_prefix}_results.json"
    os.makedirs("results", exist_ok=True)
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(results, f, indent=2, ensure_ascii=False)

    print(f"✅ 结果已保存到: {output_file}")

    # 导出 Mini-Dev 预测文件（供 mini_dev/evaluation 使用）
    try:
        from tools.prepare_minidev_predictions import build_predict_mapping_from_comparison
        print("\n🔁 导出 Mini-Dev 预测文件...")
        # 生成 mapping 并写入 mini_dev/sql_result
        mcts_map, evo_map = build_predict_mapping_from_comparison(results)
        Path("mini_dev/sql_result").mkdir(parents=True, exist_ok=True)
        with open("mini_dev/sql_result/predict_mini_dev_mcts.json", "w", encoding="utf-8") as f:
            json.dump(mcts_map, f, indent=2, ensure_ascii=False)
        with open("mini_dev/sql_result/predict_mini_dev_evolutionary.json", "w", encoding="utf-8") as f:
            json.dump(evo_map, f, indent=2, ensure_ascii=False)
        print("✅ Mini-Dev 预测文件导出完成: mini_dev/sql_result/*.json")
    except Exception as e:
        print(f"⚠️ Mini-Dev 预测文件导出失败: {e}")

    # 计算汇总统计
    print(f"\n📊 计算汇总统计...")
    mcts_times = [r["mcts_time"] for r in results if r["mcts_time"] is not None]
    evo_times = [r["evolutionary_time"] for r in results if r["evolutionary_time"] is not None]
    mcts_quality_scores = [sum(r["mcts_quality"].values()) for r in results if r["mcts_quality"]]
    evo_quality_scores = [sum(r["evolutionary_quality"].values()) for r in results if r["evolutionary_quality"]]

    print(f"\n{'='*80}")
    print(f"🎯 实验汇总报告")
    print(f"{'='*80}")
    print(f"总任务数: {total_tasks}")
    print(f"成功完成任务数: {len(results)}")
    print(f"失败任务数: {total_tasks - len(results)}")

    if mcts_times:
        mcts_avg_time = sum(mcts_times) / len(mcts_times)
        mcts_min_time = min(mcts_times)
        mcts_max_time = max(mcts_times)
        print(f"\n⏱️  MCTS 执行时间统计:")
        print(f"  平均: {mcts_avg_time:.2f}秒")
        print(f"  最快: {mcts_min_time:.2f}秒")
        print(f"  最慢: {mcts_max_time:.2f}秒")
        print(f"  样本数: {len(mcts_times)}")

    if evo_times:
        evo_avg_time = sum(evo_times) / len(evo_times)
        evo_min_time = min(evo_times)
        evo_max_time = max(evo_times)
        print(f"\n⏱️  进化算法执行时间统计:")
        print(f"  平均: {evo_avg_time:.2f}秒")
        print(f"  最快: {evo_min_time:.2f}秒")
        print(f"  最慢: {evo_max_time:.2f}秒")
        print(f"  样本数: {len(evo_times)}")

    if mcts_quality_scores:
        mcts_avg_quality = sum(mcts_quality_scores) / len(mcts_quality_scores)
        print(f"\n📈 MCTS SQL质量统计:")
        print(f"  平均质量分数: {mcts_avg_quality:.2f}")
        print(f"  样本数: {len(mcts_quality_scores)}")

    if evo_quality_scores:
        evo_avg_quality = sum(evo_quality_scores) / len(evo_quality_scores)
        print(f"\n📈 进化算法SQL质量统计:")
        print(f"  平均质量分数: {evo_avg_quality:.2f}")
        print(f"  样本数: {len(evo_quality_scores)}")

    # 时间对比分析
    if mcts_times and evo_times:
        mcts_avg = sum(mcts_times) / len(mcts_times)
        evo_avg = sum(evo_times) / len(evo_times)
        if evo_avg > 0:
            speedup = mcts_avg / evo_avg
            if speedup > 1:
                print(f"\n⚡ 性能对比: 进化算法比MCTS快 {speedup:.2f} 倍")
            else:
                print(f"\n⚡ 性能对比: MCTS比进化算法快 {1/speedup:.2f} 倍")

    print(f"\n✅ 对比实验完成！")
    print(f"详细结果请查看: {output_file}")

    # 打印 LLM token/cost 使用统计（如果可用）
    try:
        from alphasql.llm_call.openai_llm import DEFAULT_COST_RECORDER
        print('\n💰 LLM 消耗统计:')
        DEFAULT_COST_RECORDER.print_profile()
    except Exception as e:
        print(f"⚠️ 无法打印LLM成本统计: {e}")

if __name__ == "__main__":
    main()