import json
import os
from tqdm import tqdm
from loguru import logger
import pandas as pd
import argparse
med_prompt_path = './colde_start/description_prompt/question_prompt.txt'
save_prefix="med_pub_rl"
system_answer_prefix="综上，本图片诊断为"
with open(med_prompt_path, 'r', encoding='utf-8') as f:
    med_prompt_str = "<image>"+f.read()
    
def construct_answer(cleaned_label):
    if isinstance(cleaned_label, list):
        return ", ".join(cleaned_label)
    elif isinstance(cleaned_label, str):
        return cleaned_label
    else:
        raise ValueError(f"unexpected data type {type(cleaned_label)} for {cleaned_label} in construct answer for rl dataset")

def load_json(path):
    with open(path, 'r', encoding='utf-8') as f:
        return json.load(f)

def make_map_fn(split, img2record):
    def process_fn(example, idx):
        image_path = example
        record = img2record.get(image_path)
        if record is None:
            logger.warning(f"Image path {image_path} not found in cleaned_dataset. Skipping.")
            return None
        answer_str = construct_answer(record["cleaned_label"])
        data = {
            "data_source": "fundus_qa",
            "prompt": [
                {
                    "role": "user",
                    "content": med_prompt_str
                }
            ],
            # ======== 这里适配 fetch_image，images 字段变为 list of dicts =========
            "images": [{"image": record["image_path"]}],  # 确保是 [{"image": ...}]
            # =====================================================================
            "ability": "diagnosis",
            "reward_model": {
                "style": "rule",
                "ground_truth": system_answer_prefix+ answer_str
            },
            "extra_info": {
                "split": split,
                "index": idx
            }
        }
        return data
    return process_fn

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--cleaned_json", default="/data0/zhangpinglu/gy/code/FundusReasoner/experiments/med_pub_data/cleaned_img_label.json")
    parser.add_argument("--split_json", default="/data0/zhangpinglu/gy/code/FundusReasoner/experiments/med_pub_data/image_split.json")
    parser.add_argument("--output_dir", default="/data0/zhangpinglu/gy/code/FundusRL/experiments/med_pub_rl_data", help="Directory to save the processed data")
    args = parser.parse_args()
    os.makedirs(args.output_dir, exist_ok=True)
    logger.add(os.path.join(args.output_dir, "preprocess.log"), rotation="1 MB")
    cleaned_dataset = load_json(args.cleaned_json)
    image_split = load_json(args.split_json)

    # 数据dir 替换，可选,这里是由于我在不同服务器中切换了一次，之前那个服务器过期了
    ori_dir='/home/guoyi/Dataset'
    tar_dir='/home/zhangpinglu/data0/gy/Dataset'
    for item in cleaned_dataset:
        # 用re 替换路径中的原始目录
        item["image_path"] = item["image_path"].replace(ori_dir, tar_dir)
    for split_name in image_split:
        for idx, image_path in enumerate(image_split[split_name]):
            # 替换路径中的原始目录
            image_split[split_name][idx] = image_path.replace(ori_dir, tar_dir)
    
    # image_path 到 record 的映射
    img2record = {item["image_path"]: item for item in cleaned_dataset}

    for split in ['train', 'test']:
        split_image_list = image_split[split]
        processed_data = []
        logger.info(f"Processing split={split}, num_samples={len(split_image_list)}")

        fn = make_map_fn(split, img2record)
        for idx, image_path in enumerate(split_image_list):
            data = fn(image_path, idx)
            if data is not None:
                processed_data.append(data)

        output_path = os.path.join(args.output_dir, f"{save_prefix}_{split}.parquet")
        logger.info(f"Saving {split} split ({len(processed_data)} samples) to {output_path}")
        df = pd.DataFrame(processed_data)
        df.to_parquet(output_path, index=False)
        logger.success(f"{split} split saved to {output_path}")

if __name__ == "__main__":
    main()