import json
import os
from tqdm import tqdm
from loguru import logger
import pandas as pd
import argparse
diagnosis_prompt_long = """
    <image>你是一个经验丰富的眼科医生。你的任务是对给定的眼底照片进行疾病分析与诊断，并详细描述推理过程，以“思维链”形式输出。请严格按照以下结构输出：

<thinking>
<region>(x1, y1, x2, y2)</region>
<description>对上述坐标区域内的可疑病变、异常、或诊断相关特征进行专业描述，可以表达为“看到…，类似…表现，疑似…，高度怀疑…，符合…，可以确诊为…”等。如果没有发现异常，也需输出一组覆盖全图的坐标和‘未见明显异常’等描述。</description>
<!-- 如有多个区域可重复<region>与<description>对 -->
...
</thinking>
<answer>根据你的观察和推理，给出疾病的诊断结论。可以是“未见异常”或具体疾病，并说明诊断依据。</answer>

【规定】
1. 所有坐标均为格式(x1, y1, x2, y2)，分别为左上点和右下点的像素坐标，范围0~224。
2. 无论是否异常，输出时都必须至少包含一组坐标和描述。无异常时，统一用(0, 0, 224, 224)，描述为“未见明显异常”或“未见异常表现”。
3. 每个<region>后面必须有一个<description>。
4. 输出严格遵循上述格式，思维链可包含多组区域和描述。
5. 诊断结论写在<answer>标签内。

【异常示例】
<thinking>
<region>(12, 34, 88, 120)</region>
<description>该区域可见多个小圆形白色渗出物，分布在视网膜浅层，疑似硬性渗出，类似糖尿病视网膜病变表现。</description>
<region>(100, 150, 180, 210)</region>
<description>黄斑区轻度隆起，反光增强，高度怀疑黄斑水肿。</description>
</thinking>
<answer>符合非增殖期糖尿病视网膜病变伴黄斑水肿，诊断依据为视网膜硬性渗出及黄斑水肿表现。</answer>

【无异常示例】
<thinking>
<region>(0, 0, 224, 224)</region>
<description>整个眼底未见出血、渗出、微血管瘤或其他异常表现，视盘及黄斑区结构清晰。</description>
</thinking>
<answer>未见明显异常，提示眼底健康。</answer>

请根据你观察到的内容，输出推理过程（思维链）与最终诊断结论。
"""

diagnosis_prompt_short = """
<image>你是一名经验丰富的眼科医生，任务是对给定眼底照片进行分析诊断，请严格用如下格式输出：

<thinking>
<region>(x1, y1, x2, y2)</region>
<description>描述该区域内的可疑病变或异常；若无异常则坐标为(0,0,224,224)并写“未见异常”。</description>
<!-- 可多组 -->
</thinking>
<answer>根据观察，给出最终诊断结论和依据，或写“未见异常”。</answer>

【所有坐标为(x1,y1,x2,y2)，取值0~224】
【无异常统一输出(0,0,224,224)和“未见异常”】
【每组region后紧跟description】

根据观察内容输出。

"""
def construct_answer(cleaned_label):
    if isinstance(cleaned_label, list):
        return ", ".join(cleaned_label)
    elif isinstance(cleaned_label, str):
        return cleaned_label
    else:
        raise ValueError(f"unexpected data type {type(cleaned_label)} for {cleaned_label} in construct answer for rl dataset")

def load_json(path):
    with open(path, 'r', encoding='utf-8') as f:
        return json.load(f)

def make_map_fn(split, img2record):
    def process_fn(example, idx):
        image_path = example
        record = img2record.get(image_path)
        if record is None:
            logger.warning(f"Image path {image_path} not found in cleaned_dataset. Skipping.")
            return None
        answer_str = construct_answer(record["cleaned_label"])
        data = {
            "data_source": "fundus_qa",
            "prompt": [
                {
                    "role": "user",
                    "content": diagnosis_prompt_short
                }
            ],
            # ======== 这里适配 fetch_image，images 字段变为 list of dicts =========
            "images": [{"image": record["image_path"]}],  # 确保是 [{"image": ...}]
            # =====================================================================
            "ability": "diagnosis",
            "reward_model": {
                "style": "rule",
                "ground_truth": answer_str
            },
            "extra_info": {
                "split": split,
                "index": idx
            }
        }
        return data
    return process_fn

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--cleaned_json", default="/root/autodl-tmp/FundusReasoner/experiments/med_pub_data/cleaned_img_label.json")
    parser.add_argument("--split_json", default="/root/autodl-tmp/FundusReasoner/experiments/med_pub_data/image_split.json")
    parser.add_argument("--output_dir", default="/root/autodl-tmp/FundusReasoner/experiments/med_pub_rl_data", help="Directory to save the processed data")
    args = parser.parse_args()
    os.makedirs(args.output_dir, exist_ok=True)
    logger.add(os.path.join(args.output_dir, "preprocess.log"), rotation="1 MB")
    cleaned_dataset = load_json(args.cleaned_json)
    image_split = load_json(args.split_json)

    # image_path 到 record 的映射
    img2record = {item["image_path"]: item for item in cleaned_dataset}

    for split in ['train', 'test']:
        split_image_list = image_split[split]
        processed_data = []
        logger.info(f"Processing split={split}, num_samples={len(split_image_list)}")

        fn = make_map_fn(split, img2record)
        for idx, image_path in enumerate(split_image_list):
            data = fn(image_path, idx)
            if data is not None:
                processed_data.append(data)

        output_path = os.path.join(args.output_dir, f"med_pub_rl_{split}.parquet")
        logger.info(f"Saving {split} split ({len(processed_data)} samples) to {output_path}")
        df = pd.DataFrame(processed_data)
        df.to_parquet(output_path, index=False)
        logger.success(f"{split} split saved to {output_path}")

if __name__ == "__main__":
    main()