#!/usr/bin/env python3
"""
Orchestrate the experiment (MCTS vs Evolutionary) and evaluation on Mini-Dev.

Usage:
  python tools/run_experiment_pipeline.py --tasks 0 1 2 ... [--run-llm]

If --run-llm is not provided, script will do a dry-run only (pre-check, generate prediction json from existing dev_pred_sqls.json, and check evaluation readiness). This avoids consuming API credits.

If --run-llm is provided, script will call the existing `tools/run_comparison_experiment.py` with the requested tasks (this will make LLM API calls).
"""
import argparse
import subprocess
import sys
from pathlib import Path

ROOT = Path('.').resolve()


def precheck():
    print('=> Running precheck...')
    return subprocess.run([sys.executable, 'tools/precheck_experiment.py']).returncode == 0


def run_experiment(tasks, rollouts=4, mcts_config='config/qwen32b_bird_dev.yaml', evo_config='config/qwen32b_bird_dev_evolutionary.yaml'):
    print('=> Running comparison experiment (note: will call LLM APIs)')
    cmd = [sys.executable, 'tools/run_comparison_experiment.py', '--mcts-config', mcts_config, '--evolutionary-config', evo_config, '--tasks'] + [str(t) for t in tasks] + ['--rollouts', str(rollouts)]
    return subprocess.run(cmd).returncode == 0


def estimate_api_cost(model_name: str, n_tasks: int, mcts_calls_per_task: int = 200, evo_calls_per_task: int = 50, tokens_per_call: int = 1000):
    """Estimate API cost for running n_tasks for MCTS vs Evolutionary.
    These numbers are heuristic; change mcts_calls_per_task, evo_calls_per_task and tokens_per_call to refine.
    """
    from alphasql.llm_call.cost_recoder import MODEL_PRICE_PER_1M_TOKENS

    if model_name not in MODEL_PRICE_PER_1M_TOKENS:
        print(f"Model {model_name} not in price table. Cannot estimate cost precisely.")
        return None

    price = MODEL_PRICE_PER_1M_TOKENS[model_name]
    # take into account LLM request count multiplier (e.g., n=3 for batch calls)
    from alphasql.config.mcts_config import MCTSConfig
    import yaml
    model_kw = None
    try:
        cfg = yaml.safe_load(open('config/qwen32b_bird_dev.yaml'))
        model_kw = cfg.get('mcts_model_kwargs', {})
    except Exception:
        model_kw = {}
    n_multiplier = int(model_kw.get('n', 1)) if model_kw else 1
    # multiply calls by the n (parallel/combinational) factor
    mcts_calls_per_task = int(mcts_calls_per_task) * n_multiplier
    evo_calls_per_task = int(evo_calls_per_task) * n_multiplier
    mcts_tokens = mcts_calls_per_task * tokens_per_call * n_tasks
    evo_tokens = evo_calls_per_task * tokens_per_call * n_tasks
    total_tokens = mcts_tokens + evo_tokens
    prompt_fraction = 0.5  # we assume half of tokens are prompt, half are completion
    prompt_tokens = int(total_tokens * prompt_fraction)
    completion_tokens = total_tokens - prompt_tokens

    total_cost = (prompt_tokens / 1e6) * price['prompt'] + (completion_tokens / 1e6) * price['completion']

    print("\n💸 API成本估计 (粗略，基于估算调用次数 & tokens):")
    print(f"  model: {model_name}")
    print(f"  tasks: {n_tasks}")
    print(f"  MCTS calls per task (estimate): {mcts_calls_per_task}")
    print(f"  Evolutionary calls per task (estimate): {evo_calls_per_task}")
    print(f"  tokens per call (estimate): {tokens_per_call}")
    print(f"  total_tokens: {total_tokens:,}")
    print(f"  prompt_tokens: {prompt_tokens:,}, completion_tokens: {completion_tokens:,}")
    print(f"  estimated cost (USD): ${total_cost:.4f}")
    print("  (Change arguments to estimate with different assumptions.)")
    return total_cost


def postprocess_predictions():
    print('=> Generating mini-dev predictions')
    return subprocess.run([sys.executable, 'tools/prepare_minidev_predictions.py']).returncode == 0


def check_eval():
    print('=> Check mini-dev evaluation readiness')
    return subprocess.run([sys.executable, 'tools/check_minidev_evaluation.py']).returncode == 0


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--tasks', nargs='+', type=int, default=list(range(10)), help='Task IDs to run')
    parser.add_argument('--run-llm', action='store_true', help='Actually run LLM-powered solvers (will consume API credits)')
    parser.add_argument('--rollouts', type=int, default=4, help='Rollouts for MCTS')
    args = parser.parse_args()

    if not precheck():
        print('Precheck failed. Fix issues before running.')
        sys.exit(1)

    if args.run_llm:
        ok = run_experiment(args.tasks, rollouts=args.rollouts)
        if not ok:
            print('Experiment run failed. Check logs.')
            sys.exit(1)
    else:
        print('\nDry run mode — not calling LLM. Create predictions from existing results/dev_pred_sqls.json instead.\n')

    if not postprocess_predictions():
        print('Failed to generate predictions. Abort.')
        sys.exit(1)

    if not check_eval():
        print('Mini-Dev evaluation is not ready. Please ensure you downloaded the Mini-Dev sqlite DBs and ground truth.\n')
        print('See mini_dev/README.md and run the evaluation with:')
        print('  cd mini_dev/evaluation')
        print('  ./run_evaluation.sh')
        sys.exit(1)

    print('\nREADY: You can run the mini-dev evaluation script now:')
    print('  cd mini_dev/evaluation')
    print('  ./run_evaluation.sh')

    print('\nOr evaluate EX directly with the example:')
    print('  python mini_dev/evaluation/evaluation_ex.py --predicted_sql_path ../../mini_dev/sql_result/predict_mini_dev_mcts.json --ground_truth_path ../../mini_dev/sqlite/mini_dev_sqlite_gold.sql --db_root_path ../../mini_dev/sqlite/dev_databases/ --num_cpus 4')

    print('\nIf you want to proceed and use LLM (real runs), re-run this script with --run-llm.')
