#!/usr/bin/env python3
"""
从对比实验结果生成 Mini-Dev 兼容的预测文件（JSON）。

输出：
- mini_dev/sql_result/predict_mcts.json
- mini_dev/sql_result/predict_evolutionary.json

格式（每个 key 为 question_id）：
{ "<question_id>": "<db_id>\t----- bird -----\t<sql>" }

如果 `results/comparison_experiment_results.json` 不存在，则尝试使用 `results/dev_pred_sqls.json` 作为示例（干运行）。
"""
import json
from pathlib import Path

RESULTS_COMPARISON_PATH = Path("results/comparison_experiment_results.json")
DEV_PRED_SQLS_PATH = Path("results/dev_pred_sqls.json")
OUT_DIR = Path("mini_dev/sql_result")

OUT_DIR.mkdir(parents=True, exist_ok=True)


def load_comparison_results():
    if RESULTS_COMPARISON_PATH.exists():
        with open(RESULTS_COMPARISON_PATH, 'r', encoding='utf-8') as f:
            data = json.load(f)
        return data
    return None


def load_dev_preds():
    if DEV_PRED_SQLS_PATH.exists():
        with open(DEV_PRED_SQLS_PATH, 'r', encoding='utf-8') as f:
            data = json.load(f)
        return data
    return None


def build_predict_mapping_from_comparison(results):
    # results: list of dicts {"task_id", "db_id", "mcts_sql", "evolutionary_sql", ...}
    mcts_map = {}
    evo_map = {}
    for r in results:
        qid = str(r.get("task_id"))
        db = r.get("db_id", "unknown")
        mcts_sql = r.get("mcts_sql")
        evo_sql = r.get("evolutionary_sql")
        if mcts_sql:
            mcts_map[qid] = f"{db}\t----- bird -----\t{mcts_sql}"
        if evo_sql:
            evo_map[qid] = f"{db}\t----- bird -----\t{evo_sql}"
    return mcts_map, evo_map


def build_predict_mapping_from_dev(dev_preds):
    # dev_preds: mapping question_id -> SQL
    mcts_map = {}
    evo_map = {}
    # For dry run we will use the same predictions for both methods.
    for qid, sql in dev_preds.items():
        # question id maps to db_id: we need a mapping from id -> db_id from data tasks
        # fallback: use 'unknown'
        mcts_map[str(qid)] = f"unknown\t----- bird -----\t{sql}"
        evo_map[str(qid)] = f"unknown\t----- bird -----\t{sql}"
    return mcts_map, evo_map


def main():
    print("=> 生成 Mini-Dev 预测文件（干运行/真实运行）")

    comp = load_comparison_results()
    if comp is not None:
        print("Found comparison results in results/comparison_experiment_results.json, using it to create predictions")
        mcts_map, evo_map = build_predict_mapping_from_comparison(comp)
        # If either map is empty, try to fill with dev_pred_sqls.json (dry-run fallback)
        if not mcts_map or not evo_map:
            dev_preds = load_dev_preds()
            if dev_preds is not None:
                print("Filling missing predictions from results/dev_pred_sqls.json for dry-run/pseudo-eval")
                dev_mcts, dev_evo = build_predict_mapping_from_dev(dev_preds)
                # Only fill missing keys — preserve any real outputs
                for k, v in dev_mcts.items():
                    if k not in mcts_map:
                        mcts_map[k] = v
                for k, v in dev_evo.items():
                    if k not in evo_map:
                        evo_map[k] = v
    else:
        print("Comparison results not found. Using results/dev_pred_sqls.json as a fallback for a dry-run.")
        dev_preds = load_dev_preds()
        if dev_preds is None:
            print("Could not find any prediction files to simulate. Please run the experiment first or provide dev_pred_sqls.json")
            return
        mcts_map, evo_map = build_predict_mapping_from_dev(dev_preds)

    mcts_out = OUT_DIR / "predict_mini_dev_mcts.json"
    evo_out = OUT_DIR / "predict_mini_dev_evolutionary.json"

    with open(mcts_out, 'w', encoding='utf-8') as f:
        json.dump(mcts_map, f, indent=2, ensure_ascii=False)
    print(f"Wrote MCTS predictions to {mcts_out}")

    with open(evo_out, 'w', encoding='utf-8') as f:
        json.dump(evo_map, f, indent=2, ensure_ascii=False)
    print(f"Wrote Evolutionary predictions to {evo_out}")

    print("=> 完成：请检查输出文件，然后运行 mini_dev/evaluation/run_evaluation.sh 以评估（如果你已经下载并准备了评估脚本）。")


if __name__ == '__main__':
    main()
