# -*- coding: utf-8 -*-
"""
文件: tools/generate_medqa_with_agents.py

功能:
  - 读取疾病清单(all_cn_name.json)与关系白名单(diease_relation.json)
  - 通过双角色 Agent (Generator/Evaluator) 批量生成三类眼底医学问答:
      1) 单病问答 (disease-only)
      2) 两病关系/易混问答 (relation-pair)
      3) 推演问答 (reasoning)
  - 仅实例化一次模型，支持多轮生成 + 评估筛选
  - 输出 JSONL (默认) 或 JSON，自动去重
  - “内容优先、格式宽容”：非空即保留；解析失败则保存 raw_text 供后续人工修复

用法示例:
  python tools/generate_medqa_with_agents.py \
      --all_cn_path ../fundus-reasoner-dataprocess/configs/all_cn_name.json \
      --relation_path ../fundus-reasoner-dataprocess/configs/diease_relation.json \
      --gen_model_id pretrained/medgemma4b \
      --eval_model_id pretrained/medgemma4b \
      --gen_retry 3
"""

import os
import re
import json
import argparse
import random
import hashlib
from typing import List, Dict, Any, Tuple, Optional, Set

from tqdm import tqdm

# 依你的工程结构
from utils.medgemma_agent import (
    MedGemmaAgent,
    SYSTEM_PROMPT,
    GENERATOR_HEADER,
    GEN_TASK_SINGLE_DISEASE,
    GEN_TASK_RELATION_PAIR,
    GEN_TASK_REASONING,
)

# -------------------- 基础工具 --------------------

def read_json(path: str):
    if not os.path.exists(path):
        raise FileNotFoundError(f"JSON 文件不存在: {path}")
    with open(path, "r", encoding="utf-8") as f:
        return json.load(f)

def save_json(obj: Any, path: str):
    os.makedirs(os.path.dirname(path), exist_ok=True)
    with open(path, "w", encoding="utf-8") as f:
        json.dump(obj, f, ensure_ascii=False, indent=2)

def save_jsonl(items: List[Dict[str, Any]], path: str):
    os.makedirs(os.path.dirname(path), exist_ok=True)
    with open(path, "w", encoding="utf-8") as f:
        for it in items:
            f.write(json.dumps(it, ensure_ascii=False) + "\n")

def append_log(path: str, line: str):
    os.makedirs(os.path.dirname(path), exist_ok=True)
    with open(path, "a", encoding="utf-8") as f:
        f.write(line.rstrip() + "\n")

def hash_qa(q: str, a: str) -> str:
    m = hashlib.md5()
    m.update(q.strip().encode("utf-8"))
    m.update(b"||")
    m.update(a.strip().encode("utf-8"))
    return m.hexdigest()

def hash_raw(s: str) -> str:
    m = hashlib.md5()
    m.update(s.strip().encode("utf-8"))
    return m.hexdigest()

# -------------------- 文本→JSON 解析（鲁棒版） --------------------

def _strip_code_fence(s: str) -> str:
    """去掉 ``` 或 ```json 代码围栏"""
    s = s.strip()
    # 去掉成对围栏
    if s.startswith("```"):
        s = re.sub(r"^```[a-zA-Z0-9]*\s*", "", s)
        s = re.sub(r"\s*```$", "", s)
    return s.strip()

def _robust_extract_json_array(text: str) -> Optional[str]:
    """
    尝试从原文中提取 JSON 数组（鲁棒）：
    - 支持 ```json ... ``` or ``` ... ```
    - 支持无闭合 ```
    - 支持前后有日志/说明
    - 修正尾逗号/半截对象
    - 解析失败返回 None
    """
    if not isinstance(text, str) or not text.strip():
        return None
    s = text

    # 1) 优先从 ```json 围栏取
    fence_start = s.find("```json")
    if fence_start != -1:
        start = fence_start + len("```json")
        end = s.find("```", start + 1)
        json_block = s[start:] if end == -1 else s[start:end]
    else:
        # 2) 其次从 ``` 任意围栏取
        fence_start = s.find("```")
        if fence_start != -1:
            start = fence_start + len("```")
            end = s.find("```", start + 1)
            json_block = s[start:] if end == -1 else s[start:end]
        else:
            # 3) 直接找方括号数组
            m = re.search(r"\[[\s\S]*\]", s)
            if not m:
                return None
            json_block = m.group(0)

    js = json_block.strip()

    # 去掉头部非 [ 的杂质
    js = re.sub(r"^[^\[]*\[", "[", js, flags=re.S)

    # 修尾逗号
    js = re.sub(r",\s*]", "]", js)

    # 半截对象闭合
    if js.count("{") > js.count("}"):
        js += "}" * (js.count("{") - js.count("}"))
    if not js.endswith("]"):
        js += "]"

    # 先试 parse
    try:
        json.loads(js)
        return js
    except Exception:
        # 去掉最后一个半截对象
        js2 = re.sub(r",\s*{\s*[^}\]]*$", "]", js)
        try:
            json.loads(js2)
            return js2
        except Exception:
            return None

def _regex_salvage_qa_pairs(text: str) -> List[Dict[str, Any]]:
    """
    兜底：从一大段文本里用正则尽量抽取多组 question/answer
    适配 JSON-like/中文“问题/答案”行文。
    """
    if not isinstance(text, str) or not text.strip():
        return []
    s = _strip_code_fence(text)

    pairs: List[Dict[str, Any]] = []

    # 1) JSON-like："question": "...",  ... "answer": "..."
    for m in re.finditer(r'"question"\s*:\s*"([^"]+)"[\s\S]{0,300}?"answer"\s*:\s*"([^"]+)"', s, flags=re.I):
        q = m.group(1).strip()
        a = m.group(2).strip()
        if q and a:
            pairs.append({"question": q, "answer": a})

    if pairs:
        return pairs

    # 2) 中文行文：以“问题/回答/答案/问/答”开头
    blocks = re.split(r"\n\s*\n+", s)
    for b in blocks:
        q = re.search(r"(?:^|\n)\s*(?:Q|问题|问)[:：]\s*(.+)", b)
        a = re.search(r"(?:^|\n)\s*(?:A|回答|答案|答)[:：]\s*(.+)", b)
        if q and a:
            pairs.append({"question": q.group(1).strip(), "answer": a.group(1).strip()})

    return pairs

def _best_effort_to_items(obj_or_text: Any) -> Tuple[List[Dict[str, Any]], Optional[str]]:
    """
    尽力把任意返回解析成 list[ {question, answer, ...} ]；
    若失败但原文非空，返回 ([], raw_text) 供后续保存。
    """
    # 已是 list/dict/json 字符串 的情况
    if isinstance(obj_or_text, list):
        items = obj_or_text
    elif isinstance(obj_or_text, dict):
        items = [obj_or_text]
    elif isinstance(obj_or_text, str):
        # 先尝试直接 JSON 解析（去掉围栏）
        cleaned = _strip_code_fence(obj_or_text)
        if cleaned.startswith("[") or cleaned.startswith("{"):
            try:
                parsed = json.loads(cleaned)
                items = parsed if isinstance(parsed, list) else [parsed]
            except Exception:
                # 尝试鲁棒切片
                arr = _robust_extract_json_array(obj_or_text)
                if arr:
                    parsed = json.loads(arr)
                    items = parsed if isinstance(parsed, list) else [parsed]
                else:
                    # 正则兜底抽取多组问答
                    pairs = _regex_salvage_qa_pairs(obj_or_text)
                    if pairs:
                        items = pairs
                    else:
                        # 实在不行：返回 raw_text
                        return [], obj_or_text
        else:
            # 非 JSON 起始，走鲁棒切片/正则兜底
            arr = _robust_extract_json_array(obj_or_text)
            if arr:
                parsed = json.loads(arr)
                items = parsed if isinstance(parsed, list) else [parsed]
            else:
                pairs = _regex_salvage_qa_pairs(obj_or_text)
                if pairs:
                    items = pairs
                else:
                    return [], obj_or_text
    else:
        # 其它类型直接失败
        return [], str(obj_or_text)

    # 归一化 key + 过滤空
    norm: List[Dict[str, Any]] = []
    for it in items:
        if not isinstance(it, dict):
            continue
        # 容忍 observation/question/answer 结构
        q = it.get("question")
        a = it.get("answer")
        if not q and "observation" in it and "question" in it and "answer" in it:
            q = it.get("question")
            a = it.get("answer")
        if isinstance(q, str) and isinstance(a, str) and q.strip() and a.strip():
            norm.append({"question": q.strip(), "answer": a.strip(), **{k:v for k,v in it.items() if k not in ("question","answer")}})
    return norm, None

# -------------------- 关系对构建 --------------------

def build_relation_pairs(rel_map: Dict[str, Any]) -> List[Tuple[str, str]]:
    """
    diease_relation.json:
      { "青光眼": {"white_list": ["视神经萎缩","高眼压症"]}, ... }
    输出唯一化(无方向)二元组列表。
    """
    if not isinstance(rel_map, dict):
        raise TypeError(f"diease_relation.json 顶层必须是 dict, 实际: {type(rel_map)}")
    seen: Set[Tuple[str, str]] = set()
    pairs: List[Tuple[str, str]] = []
    for a, info in rel_map.items():
        if not isinstance(info, dict):
            continue
        wl = info.get("white_list", []) or []
        if not isinstance(wl, list):
            continue
        for b in wl:
            if not isinstance(b, str) or not b.strip() or a == b:
                continue
            key = tuple(sorted([a, b]))
            if key not in seen:
                seen.add(key)
                pairs.append(key)
    return pairs

# -------------------- Prompt 组装（避免 format 花括号冲突） --------------------

def _tpl_replace(tpl: str, **kwargs) -> str:
    """安全替换 {disease_cn}/{a_cn}/{b_cn}/{n}，不解释其它花括号"""
    out = tpl
    for k, v in kwargs.items():
        out = out.replace("{"+k+"}", str(v))
    return out

def _make_messages_single(disease_cn: str, n: int) -> List[Dict[str, Any]]:
    user_text = GENERATOR_HEADER + "\n" + _tpl_replace(GEN_TASK_SINGLE_DISEASE, disease_cn=disease_cn, n=n)
    return [
        {"role": "system", "content": [{"type": "text", "text": SYSTEM_PROMPT}]},
        {"role": "user",   "content": [{"type": "text", "text": user_text}]}
    ]

def _make_messages_relation(a_cn: str, b_cn: str, n: int) -> List[Dict[str, Any]]:
    user_text = GENERATOR_HEADER + "\n" + _tpl_replace(GEN_TASK_RELATION_PAIR, a_cn=a_cn, b_cn=b_cn, n=n)
    return [
        {"role": "system", "content": [{"type": "text", "text": SYSTEM_PROMPT}]},
        {"role": "user",   "content": [{"type": "text", "text": user_text}]}
    ]

def _make_messages_reasoning(disease_cn: str, n: int) -> List[Dict[str, Any]]:
    user_text = GENERATOR_HEADER + "\n" + _tpl_replace(GEN_TASK_REASONING, disease_cn=disease_cn, n=n)
    return [
        {"role": "system", "content": [{"type": "text", "text": SYSTEM_PROMPT}]},
        {"role": "user",   "content": [{"type": "text", "text": user_text}]}
    ]

# -------------------- Pipeline --------------------

class QAAgentPipeline:
    """
    只在 __init__ 时各加载一次模型。
    三类任务均提供“尽力而为”的生成-解析-评估流程：
      - 空结果：重试（默认 3 次）；
      - 非空非 JSON：鲁棒解析→正则抽取→保存 raw_text；
      - 仅对解析出的 QA 对进行打分筛选；raw_text 另存备查。
    """
    def __init__(
        self,
        gen_model_id: str,
        eval_model_id: str,
        gen_temperature: float = 0.95,
        gen_top_p: float = 0.95,
        eval_temperature: float = 0.2,
        eval_top_p: float = 0.2,
        max_new_tokens: int = 2048,
        seed: int = 42,
        gen_retry: int = 3,
        log_path: str = "./experiments/logs/medqa_gen.log",
    ):
        random.seed(seed)
        self.gen = MedGemmaAgent(
            model_id=gen_model_id, role="generator",
            temperature=gen_temperature, top_p=gen_top_p,
            max_new_tokens=max_new_tokens
        )
        self.eva = MedGemmaAgent(
            model_id=eval_model_id, role="evaluator",
            temperature=eval_temperature, top_p=eval_top_p,
            max_new_tokens=max_new_tokens
        )
        self.gen_retry = max(1, int(gen_retry))
        self.log_path = log_path

    # ---------- 公共打分 ----------
    def _evaluate_items(self, items: List[Dict[str, Any]], context: str) -> List[Dict[str, Any]]:
        result = []
        for it in items:
            q, a = it.get("question", ""), it.get("answer", "")
            if not isinstance(q, str) or not isinstance(a, str) or not q.strip() or not a.strip():
                continue

            score_dict = self.eva.evaluate_qa(q, a, context=context)

            # ========== 统一字段提取 ==========
            decision = score_dict.get("decision") or score_dict.get("is_correct") or None
            rationale = score_dict.get("rationale") or score_dict.get("reason") or None
            parsed_from = score_dict.get("parsed_from", None)
            conf = score_dict.get("confidence", None)
            raw_text = score_dict.get("raw", None)
            numeric_score = score_dict.get("score", None)

            # --- 宽松兜底逻辑 ---
            # 尝试从 decision/is_correct 映射到基准分
            if isinstance(decision, str):
                d = decision.lower()
                if d in ("accept", "true", "yes", "correct"):
                    base = 1.0
                elif d in ("reject", "false", "no", "incorrect"):
                    base = 0.0
                elif d in ("uncertain", "neutral", "unknown"):
                    base = 0.5
                else:
                    base = 0.5
            elif decision is True:
                base = 1.0
            elif decision is False:
                base = 0.0
            else:
                base = 0.5

            # --- 置信度优先选 confidence，否则选 score ---
            conf_val = None
            for cand in (conf, numeric_score):
                if isinstance(cand, (float, int)):
                    conf_val = float(cand)
                    break
                if isinstance(cand, str):
                    try:
                        conf_val = float(cand.strip("%")) / (100 if "%" in cand else 1)
                        break
                    except Exception:
                        pass
            if conf_val is None:
                conf_val = 0.5  # 默认中立置信度

            # --- 计算总分 ---
            total_score = round(base * max(0.0, min(1.0, conf_val)), 3)

            # --- 整合输出 ---
            it2 = dict(it)
            it2["eval"] = {
                "decision": decision,
                "score": numeric_score,
                "rationale": rationale,
                "is_correct": score_dict.get("is_correct"),
                "confidence": conf_val,
                "parsed_from": parsed_from,
                "raw": raw_text,
            }
            it2["score"] = total_score
            result.append(it2)

        # 按总分排序
        result.sort(key=lambda x: x.get("score", 0.0), reverse=True)
        return result



    # ---------- 三类任务：生成 + 解析 + 重试 + 打分 ----------
    def single_for_disease(self, disease_cn: str, n_generate: int, keep_top_k: int) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
        """
        返回 (parsed_items, raw_fallbacks)
        parsed_items: 通过解析与评估的问答条目
        raw_fallbacks: 未能解析成问答的原文片段，结构:
            {"type":"single","disease":..., "raw_text": "..."}
        """
        raw_fallbacks: List[Dict[str, Any]] = []
        items: List[Dict[str, Any]] = []

        # 先走 Agent 的高层接口
        raw_or_struct = self.gen.generate_single_disease(disease_cn, n=n_generate)
        parsed, raw_text = _best_effort_to_items(raw_or_struct)
        if not parsed and not raw_text:
            # 空 -> 重试 N 次，直接用 _chat + 相同模板
            for r in range(self.gen_retry):
                messages = _make_messages_single(disease_cn, n_generate)
                raw_text = self.gen._chat(messages)
                parsed, raw_text2 = _best_effort_to_items(raw_text)
                if parsed:
                    break
                raw_text = raw_text2 or raw_text  # 保留上一次原文
        if parsed:
            scored = self._evaluate_items(parsed, context=f"疾病={disease_cn}；任务=疾病单点")
            return scored[:keep_top_k], raw_fallbacks
        if raw_text:
            # 记录并返回 raw 备份
            append_log(self.log_path, f"[single][unparsed] {disease_cn} | snippet={raw_text[:200].replace(chr(10),' ')}")
            raw_fallbacks.append({"type":"single","disease":disease_cn,"raw_text":raw_text})
        return [], raw_fallbacks

    def relation_for_pair(self, a_cn: str, b_cn: str, n_generate: int, keep_top_k: int) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
        raw_fallbacks: List[Dict[str, Any]] = []
        topic = f"{a_cn} vs {b_cn}"

        raw_or_struct = self.gen.generate_relation_pair(a_cn, b_cn, n=n_generate)
        parsed, raw_text = _best_effort_to_items(raw_or_struct)
        if not parsed and not raw_text:
            for r in range(self.gen_retry):
                messages = _make_messages_relation(a_cn, b_cn, n_generate)
                raw_text = self.gen._chat(messages)
                parsed, raw_text2 = _best_effort_to_items(raw_text)
                if parsed:
                    break
                raw_text = raw_text2 or raw_text
        if parsed:
            scored = self._evaluate_items(parsed, context=f"疾病A={a_cn}；疾病B={b_cn}；任务=两病关系")
            return scored[:keep_top_k], raw_fallbacks
        if raw_text:
            append_log(self.log_path, f"[relation][unparsed] {topic} | snippet={raw_text[:200].replace(chr(10),' ')}")
            raw_fallbacks.append({"type":"relation","disease_a":a_cn,"disease_b":b_cn,"raw_text":raw_text})
        return [], raw_fallbacks

    def reasoning_for_disease(self, disease_cn: str, n_generate: int, keep_top_k: int) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
        raw_fallbacks: List[Dict[str, Any]] = []

        raw_or_struct = self.gen.generate_reasoning(disease_cn, n=n_generate)
        parsed, raw_text = _best_effort_to_items(raw_or_struct)
        if not parsed and not raw_text:
            for r in range(self.gen_retry):
                messages = _make_messages_reasoning(disease_cn, n_generate)
                raw_text = self.gen._chat(messages)
                parsed, raw_text2 = _best_effort_to_items(raw_text)
                if parsed:
                    break
                raw_text = raw_text2 or raw_text
        if parsed:
            scored = self._evaluate_items(parsed, context=f"目标疾病={disease_cn}；任务=推演")
            return scored[:keep_top_k], raw_fallbacks
        if raw_text:
            append_log(self.log_path, f"[reasoning][unparsed] {disease_cn} | snippet={raw_text[:200].replace(chr(10),' ')}")
            raw_fallbacks.append({"type":"reasoning","disease":disease_cn,"raw_text":raw_text})
        return [], raw_fallbacks

# -------------------- CLI --------------------

def parse_args():
    ap = argparse.ArgumentParser(description="批量生成眼底医学问答（双角色Agent, 内容优先/格式宽容版）")
    ap.add_argument("--all_cn_path", type=str,
                    default="../fundus-reasoner-dataprocess/configs/all_cn_name.json",
                    help="所有中文疾病/病灶清单（list[str]）")
    ap.add_argument("--relation_path", type=str,
                    default="../fundus-reasoner-dataprocess/configs/diease_relation.json",
                    help="疾病关系白名单映射（dict[str, {white_list: [...] }])")
    ap.add_argument("--out_dir", type=str, default="./experiments/dataset/generated_medqa",
                    help="输出目录")

    # 任务开关
    ap.add_argument("--disable_single", action="store_true", help="禁用 单病问答 生成")
    ap.add_argument("--disable_relation", action="store_true", help="禁用 两病关系 生成")
    ap.add_argument("--disable_reasoning", action="store_true", help="禁用 推演问答 生成")

    # 模型
    ap.add_argument("--gen_model_id", type=str, default="pretrained/medgemma4b")
    ap.add_argument("--eval_model_id", type=str, default="pretrained/medgemma4b")

    # 单病参数
    ap.add_argument("--single_calls_per_disease", type=int, default=2,
                    help="每个疾病调用次数（增加多样性）")
    ap.add_argument("--single_n_generate", type=int, default=8,
                    help="每次生成多少条候选")
    ap.add_argument("--single_keep_top_k", type=int, default=6,
                    help="每次保留Top-K")

    # 关系参数
    ap.add_argument("--relation_calls_per_pair", type=int, default=2)
    ap.add_argument("--relation_n_generate", type=int, default=8)
    ap.add_argument("--relation_keep_top_k", type=int, default=6)

    # 推演参数
    ap.add_argument("--reasoning_calls_per_disease", type=int, default=1)
    ap.add_argument("--reasoning_n_generate", type=int, default=6)
    ap.add_argument("--reasoning_keep_top_k", type=int, default=5)

    # 解析/重试
    ap.add_argument("--gen_retry", type=int, default=3, help="生成为空时的最大重试次数")
    ap.add_argument("--log_path", type=str, default="./experiments/logs/medqa_gen.log")

    # 输出
    ap.add_argument("--format", type=str, default="jsonl", choices=["jsonl", "json"],
                    help="解析成功条目的输出格式")
    ap.add_argument("--seed", type=int, default=42)

    return ap.parse_args()

def main():
    args = parse_args()
    random.seed(args.seed)

    os.makedirs(args.out_dir, exist_ok=True)
    out_single = os.path.join(args.out_dir, "med_qa_single." + args.format)
    out_relation = os.path.join(args.out_dir, "med_qa_relation." + args.format)
    out_reasoning = os.path.join(args.out_dir, "med_qa_reasoning." + args.format)

    # 未解析原文备份
    out_single_raw   = os.path.join(args.out_dir, "med_qa_single_raw_unparsed.jsonl")
    out_relation_raw = os.path.join(args.out_dir, "med_qa_relation_raw_unparsed.jsonl")
    out_reason_raw   = os.path.join(args.out_dir, "med_qa_reasoning_raw_unparsed.jsonl")

    # 读取清单
    all_cn_list: List[str] = read_json(args.all_cn_path)
    if not isinstance(all_cn_list, list) or not all_cn_list:
        raise ValueError(f"all_cn_name.json 需要非空 list[str]，实际: {type(all_cn_list)}")

    rel_map: Dict[str, Any] = read_json(args.relation_path)
    pairs = build_relation_pairs(rel_map)
    # debug
    all_cn_list=all_cn_list[:5]
    pairs=pairs[:5]
    # 初始化一次模型
    pipe = QAAgentPipeline(
        gen_model_id=args.gen_model_id,
        eval_model_id=args.eval_model_id,
        seed=args.seed,
        gen_retry=args.gen_retry,
        log_path=args.log_path
    )

    # 去重集合（跨任务也复用）
    seen: Set[str] = set()

    # ---- 单病 ----
    single_items: List[Dict[str, Any]] = []
    single_raw_fallbacks: List[Dict[str, Any]] = []
    if not args.disable_single:
        for dis in tqdm(all_cn_list, desc="单病生成"):
            for _ in range(args.single_calls_per_disease):
                parsed, raw_fallbacks = pipe.single_for_disease(
                    disease_cn=dis,
                    n_generate=args.single_n_generate,
                    keep_top_k=args.single_keep_top_k
                )
                for it in parsed:
                    h = hash_qa(it.get("question",""), it.get("answer",""))
                    if h not in seen:
                        seen.add(h)
                        single_items.append(it)
                single_raw_fallbacks.extend(raw_fallbacks)

    # ---- 关系 ----
    relation_items: List[Dict[str, Any]] = []
    relation_raw_fallbacks: List[Dict[str, Any]] = []
    if not args.disable_relation:
        for a_cn, b_cn in tqdm(pairs, desc="两病关系生成"):
            for _ in range(args.relation_calls_per_pair):
                parsed, raw_fallbacks = pipe.relation_for_pair(
                    a_cn=a_cn, b_cn=b_cn,
                    n_generate=args.relation_n_generate,
                    keep_top_k=args.relation_keep_top_k
                )
                for it in parsed:
                    h = hash_qa(it.get("question",""), it.get("answer",""))
                    if h not in seen:
                        seen.add(h)
                        relation_items.append(it)
                relation_raw_fallbacks.extend(raw_fallbacks)

    # ---- 推演 ----
    reasoning_items: List[Dict[str, Any]] = []
    reasoning_raw_fallbacks: List[Dict[str, Any]] = []
    if not args.disable_reasoning:
        for dis in tqdm(all_cn_list, desc="推演生成"):
            for _ in range(args.reasoning_calls_per_disease):
                parsed, raw_fallbacks = pipe.reasoning_for_disease(
                    disease_cn=dis,
                    n_generate=args.reasoning_n_generate,
                    keep_top_k=args.reasoning_keep_top_k
                )
                for it in parsed:
                    h = hash_qa(it.get("question",""), it.get("answer",""))
                    if h not in seen:
                        seen.add(h)
                        reasoning_items.append(it)
                reasoning_raw_fallbacks.extend(raw_fallbacks)

    # ---- 保存（解析成功的）----
    def _save(items: List[Dict[str, Any]], path: str):
        if args.format == "jsonl":
            save_jsonl(items, path)
        else:
            save_json(items, path)

    if not args.disable_single:
        _save(single_items, out_single)
        print(f"[OK] 单病问答(已解析) 保存 {len(single_items)} 条 -> {out_single}")
    if not args.disable_relation:
        _save(relation_items, out_relation)
        print(f"[OK] 关系问答(已解析) 保存 {len(relation_items)} 条 -> {out_relation}")
    if not args.disable_reasoning:
        _save(reasoning_items, out_reasoning)
        print(f"[OK] 推演问答(已解析) 保存 {len(reasoning_items)} 条 -> {out_reasoning}")

    # ---- 保存（未解析原文）----
    if single_raw_fallbacks:
        save_jsonl(single_raw_fallbacks, out_single_raw)
        print(f"[OK] 单病问答(未解析原文) 保存 {len(single_raw_fallbacks)} 条 -> {out_single_raw}")
    if relation_raw_fallbacks:
        save_jsonl(relation_raw_fallbacks, out_relation_raw)
        print(f"[OK] 关系问答(未解析原文) 保存 {len(relation_raw_fallbacks)} 条 -> {out_relation_raw}")
    if reasoning_raw_fallbacks:
        save_jsonl(reasoning_raw_fallbacks, out_reason_raw)
        print(f"[OK] 推演问答(未解析原文) 保存 {len(reasoning_raw_fallbacks)} 条 -> {out_reason_raw}")

    print(f"[LOG] 过程日志: {args.log_path}")

if __name__ == "__main__":
    main()
