# -*- coding: utf-8 -*-
"""
按“诊断 vs 其他题型”比例，对已有 Alpaca 数据进行两级采样：
  ① 第一级：对每个题型随机采 10% 作为预采样集；
  ② 第二级：从预采样集内，诊断类全保留，其他类按设定比例采样（0%、5%、20%、50%、100% 等）。

输出：
  - 多个采样版本的 JSON 文件
  - dataset_info.json（含 recommended_epoch 计算）
  - 控制台 summary 打印每个子集大小和推荐 epoch

推荐 epoch 计算方式：
  epoch = ceil( all_set_size / subset_size )
"""

import os
import json
import math
import random
import argparse
from typing import Any, Dict, List, Tuple

# ================= 工具函数 =================

def read_json(path: str):
    with open(path, "r", encoding="utf-8") as f:
        return json.load(f)

def save_json(data: Any, path: str):
    os.makedirs(os.path.dirname(path), exist_ok=True)
    with open(path, "w", encoding="utf-8") as f:
        json.dump(data, f, ensure_ascii=False, indent=2)
    print(f"[OK] 已保存 {path} ({len(data) if isinstance(data, list) else 'info'})")

# ================= 数据加载与解析 =================

_ALLOWED_QTYPES = {"diagnosis", "judge", "compare", "choice"}

def _load_source_datainfo(alpaca_dir: str) -> Dict[str, Any]:
    p = os.path.join(alpaca_dir, "dataset_info.json")
    return read_json(p) if os.path.exists(p) else {}

def _resolve_source_file(alpaca_dir: str, dataname: str, src_info: Dict[str, Any]) -> Tuple[str, Dict[str, str], str]:
    """解析数据集路径和列信息"""
    default_cols = {"prompt": "instruction", "query": "input", "response": "output", "images": "images"}

    if dataname.lower().endswith(".json"):
        abs_path = os.path.join(alpaca_dir, dataname)
        if not os.path.exists(abs_path):
            raise FileNotFoundError(f"[未找到源文件] {abs_path}")
        return abs_path, default_cols, os.path.splitext(os.path.basename(dataname))[0]

    if dataname not in src_info:
        raise KeyError(f"[源 datainfo 缺少条目] {dataname}")
    entry = src_info[dataname]
    file_name = entry.get("file_name")
    cols = entry.get("columns", default_cols)
    abs_path = os.path.join(alpaca_dir, file_name)
    if not os.path.exists(abs_path):
        raise FileNotFoundError(f"[未找到源文件] {abs_path}")
    return abs_path, cols, dataname

# ================= 数据划分与采样 =================

def _split_pools(data: List[Dict[str, Any]]) -> Tuple[List[Dict[str,Any]], List[Dict[str,Any]]]:
    diag, others = [], []
    for item in data:
        qtype = item.get("question_type")
        if qtype not in _ALLOWED_QTYPES:
            continue
        if qtype == "diagnosis":
            diag.append(item)
        else:
            others.append(item)
    return diag, others

def _sample_others(others: List[Dict[str,Any]], ratio: float, seed: int) -> List[Dict[str,Any]]:
    if ratio <= 0.0:
        return []
    if ratio >= 1.0:
        return list(others)
    rng = random.Random(seed)
    k = int(math.floor(len(others) * ratio))
    return rng.sample(others, k=k)

def _name_for_ratio(base_key: str, ratio: float) -> str:
    if ratio >= 1.0:
        return f"{base_key}_all"
    if ratio <= 0.0:
        return f"{base_key}_diag_only"
    pct = int(round(ratio * 100))
    return f"{base_key}_other_p{pct}"

# ================= 主函数 =================

def build_ratio_sets_with_pre_sample(
    alpaca_dir: str,
    dataname: str,
    output_dir: str,
    ratios: List[float],
    pre_sample_ratio: float = 0.2,
    seed: int = 42
):
    os.makedirs(output_dir, exist_ok=True)

    src_info = _load_source_datainfo(alpaca_dir)
    src_path, columns, base_key = _resolve_source_file(alpaca_dir, dataname, src_info)
    data = read_json(src_path)
    if not isinstance(data, list):
        raise ValueError(f"[源数据格式错误] {src_path}")

    # ===== 第一级预采样 (每类取 20%) =====
    rng = random.Random(seed)
    buckets = {}
    for item in data:
        qt = item.get("question_type")
        if qt not in _ALLOWED_QTYPES:
            continue
        buckets.setdefault(qt, []).append(item)

    pre_sampled = []
    for qt, items in buckets.items():
        k = int(len(items) * pre_sample_ratio)
        pre_sampled.extend(rng.sample(items, k=k))
    print(f"[预采样完成] 共 {len(pre_sampled)} 条 ({pre_sample_ratio*100:.0f}%)")

    # ===== 第二级采样 (诊断全保留 + 其他按比例抽) =====
    diag_pool, other_pool = _split_pools(pre_sampled)
    out_info, size_map = {}, {}

    for r in ratios:
        others_pick = _sample_others(other_pool, r, seed)
        subset = diag_pool + others_pick
        rng.shuffle(subset)

        out_key = _name_for_ratio(base_key, r)
        out_file = f"{out_key}.json"
        out_path = os.path.join(output_dir, out_file)
        save_json(subset, out_path)

        out_info[out_key] = {"file_name": out_file, "columns": columns}
        size_map[r] = len(subset)

    # ===== 推荐 epoch 计算 =====
    if 1.0 not in size_map:
        raise ValueError("ratio=1.0 (all) 数据缺失，无法计算推荐 epoch")

    base_n = size_map[1.0]
    for r, n in size_map.items():
        rec_epoch = math.ceil(base_n / n) if n > 0 else None
        out_key = _name_for_ratio(base_key, r)
        out_info[out_key]["recommended_epoch"] = rec_epoch

    save_json(out_info, os.path.join(output_dir, "dataset_info.json"))

    # ===== 打印 summary =====
    print("\n=== 数据集采样结果 ===")
    print(f"源数据总数: {len(data)}")
    print(f"预采样后: {len(pre_sampled)} (20%)")
    print("比例\t样本数\t推荐epoch")
    for r in sorted(ratios, reverse=True):
        k = _name_for_ratio(base_key, r)
        print(f"{r:>4.2f}\t{size_map[r]:>6}\t{out_info[k]['recommended_epoch']}")
    print("======================\n")

    return {
        "source": {"path": src_path, "total": len(data)},
        "pre_sample": len(pre_sampled),
        "generated": {k: v["file_name"] for k, v in out_info.items()},
        "epoch_suggestion": {k: v["recommended_epoch"] for k, v in out_info.items()}
    }

# ================= CLI =================

def parse_args():
    p = argparse.ArgumentParser(description="按题型比例进行两级采样（预采样20% + 比例采样）")
    p.add_argument("--alpaca_dir", type=str, default="/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/Alpaca_data/", help="源 Alpaca 目录")
    p.add_argument("--dataname", type=str, default="med_pub_simpleQA_train", help="数据集名或文件名")
    p.add_argument("--output_dir", type=str, default="/home/zhangpinglu/data0/gy/Dataset/fundusreasoner/alpaca_sub_exp_1", help="输出目录")
    p.add_argument("--ratios", type=str, default="1,0,0.05,0.2,0.5", help="采样比例列表，例如 '1,0,0.05,0.2,0.5'")
    p.add_argument("--pre_sample_ratio", type=float, default=0.1, help="预采样比例（默认10%）")
    p.add_argument("--seed", type=int, default=42, help="随机种子")
    return p.parse_args()

if __name__ == "__main__":
    args = parse_args()
    ratios = [float(x.strip()) for x in args.ratios.split(",") if x.strip()]
    summary = build_ratio_sets_with_pre_sample(
        alpaca_dir=args.alpaca_dir,
        dataname=args.dataname,
        output_dir=args.output_dir,
        ratios=ratios,
        pre_sample_ratio=args.pre_sample_ratio,
        seed=args.seed
    )
    print(json.dumps(summary, ensure_ascii=False, indent=2))
