'''
生成sft 数据集脚本
1. 读取异常样本，过滤黑名单
2. 异常样本划分为train/test
3. 从公开收集正常样本，划分为train/test
4. 生成多模态数据

对于原始的标注，做进一步细化description，参考cold_start/description_prompt/med_description.json


'''
import os
import json
import random
from collections import Counter
from tqdm import tqdm

# ========= 配置 =========
description_json_path = './colde_start/description_prompt/med_description.json'
data_path = '/home/zhangpinglu/data0/gy/Dataset/aier_processed/'
annotation_name = 'annotations.json'
image_dir = os.path.join(data_path, 'images')
black_list_label = ['不可用', '其他眼底病', '进一步检查']
med_prompt_path = './colde_start/description_prompt/question_prompt.txt'
test_ratio = 0.1
normal_ratio = 0.05   # 正常/异常比，1表示各占一半
pub_data_dir = '/home/zhangpinglu/data0/gy/Dataset/public_processed'
save_dir = './experiments/aier_colde_start_data'
os.makedirs(save_dir, exist_ok=True)
os.system(f'rm -rf {save_dir}/*')  # 清空

img_size = (224, 224)  # 全局

# ========= 工具函数 =========
def construct_prompts(data, description_dict, prompt_template_str):
    thinking_list = []
    if data.get("lesions"):
        for lesion in data["lesions"]:
            name = lesion["name"]
            desc = description_dict.get(name, "未见异常")
            bbox = lesion["bbox"]
            x1 = int(round(bbox[0] * img_size[0]))
            y1 = int(round(bbox[1] * img_size[1]))
            x2 = int(round(bbox[2] * img_size[0]))
            y2 = int(round(bbox[3] * img_size[1]))
            region_str = f"<region>({x1}, {y1}, {x2}, {y2})</region>"
            description_str = f"<description>{desc}</description>"
            thinking_list.append(f"{region_str} {description_str}")
    else:
        region_str = f"<region>(0, 0, {img_size[0]}, {img_size[1]})</region>"
        description_str = "<description>未见异常</description>"
        thinking_list.append(f"{region_str} {description_str}")

    thinking = "<thinking>\n" + "\n".join(thinking_list) + "\n</thinking>"
    label = data.get("image_label", "未见异常")
    fin_answer = f"<answer>综上，本图片诊断为{label}</answer>"
    answer = f"{thinking}\n{fin_answer}"
    question = prompt_template_str.strip()
    return question, answer

def construct_normal_prompt(prompt_template_str):
    region_str = f"<region>(0, 0, {img_size[0]}, {img_size[1]})</region>"
    description_str = "<description>未见异常</description>"
    thinking = "<thinking>\n" + f"{region_str} {description_str}" + "\n</thinking>"
    fin_answer = f"<answer>综上，本图片诊断为正常眼底</answer>"
    answer = f"{thinking}\n{fin_answer}"
    question = prompt_template_str.strip()
    return question , answer

def is_black_label(label):
    return label in black_list_label

def is_normal_diag(diag_text):
    """六类归为正常的标注内容"""
    return diag_text.strip() in [
        "retinal vasculature remain normal",
        "normal orange-red fundus with red branched curving vasculature entering the pink optic disc with sharp margins",
        "while the optic disc and retinal vasculature remain normal",
        "normal orange-red fundus with red branched curving vasculature entering the pink optic disc with sharp margins and a",
        "physiological finding",
        "normal"
    ]

# ========= 数据加载 =========
with open(os.path.join(data_path, annotation_name), 'r', encoding='utf-8') as f:
    annotations = json.load(f)
with open(med_prompt_path, 'r', encoding='utf-8') as f:
    med_prompt_str = f.read()
with open(description_json_path, 'r', encoding='utf-8') as f:
    med_descriptions = json.load(f)


# ========= 1. 异常样本预处理，过滤黑名单 =========
valid_imgs = []
empty_lesion_imgs = []
for img_name, info in annotations.items():
    label = info.get("image_label", "")
    if is_black_label(label):
        continue
    lesions = info.get("lesions", [])
    if not lesions or len(lesions) == 0:
        empty_lesion_imgs.append(img_name)
        continue
    valid_imgs.append(img_name)

print(f"过滤后异常图片数: {len(valid_imgs)}")
print(f"无lesion异常图片数: {len(empty_lesion_imgs)}")
# 保存无lesion图片列表
with open(os.path.join(save_dir, 'empty_lesion.txt'), 'w', encoding='utf-8') as f:
    for name in empty_lesion_imgs:
        f.write(name + '\n')

# ========= 2. 异常样本train/test划分 =========
def split_train_test(img_list, test_ratio=0.1, seed=42):
    random.seed(seed)
    img_list = list(img_list)
    random.shuffle(img_list)
    n_test = int(len(img_list) * test_ratio)
    test_imgs = set(img_list[:n_test])
    train_imgs = set(img_list[n_test:])
    return train_imgs, test_imgs

train_images, test_images = split_train_test(valid_imgs, test_ratio=test_ratio)
print(f"异常样本划分后train: {len(train_images)}, test: {len(test_images)}")
image_to_split = {}
for img in train_images:
    image_to_split[img] = 'train'
for img in test_images:
    image_to_split[img] = 'test'

# ========= 3. 正常样本收集与划分 =========
normal_img_dict = {}  # {img_name: sample_dict}
for dataset_name in os.listdir(pub_data_dir):
    dataset_dir = os.path.join(pub_data_dir, dataset_name)
    ann_path = os.path.join(dataset_dir, 'annotations.json')
    if not os.path.isfile(ann_path):
        continue
    with open(ann_path, 'r', encoding='utf-8') as f:
        pub_anns = json.load(f)
    for img_name, info in pub_anns.items():
        diag = info.get("diagnosis", {}).get("text", "").strip()
        if not is_normal_diag(diag):
            continue
        # 标准化结构：兼容各公开集字段
        new_sample = {
            "image_path":os.path.join(dataset_dir,info['image_path']),
            "image_label": "正常眼底"
        }
        normal_img_dict[f"{dataset_name}:{img_name}"] = new_sample
print(f"收集到正常样本数: {len(normal_img_dict)}")

# ========= 4. 正常样本采样/划分 =========
num_abnormal = len(valid_imgs)
num_normal = int(num_abnormal * normal_ratio)
normal_img_names = list(normal_img_dict.keys())
random.seed(42)
random.shuffle(normal_img_names)
normal_img_names = normal_img_names[:num_normal]

normal_train_imgs, normal_test_imgs = split_train_test(normal_img_names, test_ratio=test_ratio)
print(f"正常样本划分后train: {len(normal_train_imgs)}, test: {len(normal_test_imgs)}")

# ========= 5. 生成多模态数据 =========
multimodal_data_train, multimodal_data_test = [], []

# a. 异常
for img_name in tqdm(valid_imgs, desc='生成异常多模态数据'):
    data = annotations[img_name]
    split = image_to_split[img_name]
    question, answer = construct_prompts(data, med_descriptions, med_prompt_str)
    multimodal_item = {
        "instruction": "<image>" + question,
        "input": "",
        "output": answer,
        "images": [data["image_path"]]
    }
    if split == 'train':
        multimodal_data_train.append(multimodal_item)
    else:
        multimodal_data_test.append(multimodal_item)

# b. 正常
for key in tqdm(normal_img_names, desc='生成正常多模态数据'):
    sample = normal_img_dict[key]
    img_path = sample["image_path"]
    # train/test
    split = 'train' if key in normal_train_imgs else 'test'
    question, answer = construct_normal_prompt(med_prompt_str)
    multimodal_item = {
        "instruction": "<image>" + question,
        "input": "",
        "output": answer,
        "images": [img_path]
    }
    if split == 'train':
        multimodal_data_train.append(multimodal_item)
    else:
        multimodal_data_test.append(multimodal_item)
# 打乱训练数据
random.shuffle(multimodal_data_train)

print(f"最终合并train样本数: {len(multimodal_data_train)}, test样本数: {len(multimodal_data_test)}")
# ========= 6. 保存数据及划分 =========
# 6.1 划分文件，包含正常与异常
split_json_data = {
    "train": list(train_images) + list(normal_train_imgs),
    "test": list(test_images) + list(normal_test_imgs),
}
with open(os.path.join(save_dir, "image_split.json"), 'w', encoding='utf-8') as f:
    json.dump(split_json_data, f, ensure_ascii=False, indent=2)
# 6.2 主体数据
with open(os.path.join(save_dir, "aier_cold_start_train.json"), 'w', encoding='utf-8') as f:
    json.dump(multimodal_data_train, f, ensure_ascii=False, indent=2)
with open(os.path.join(save_dir, "aier_cold_start_test.json"), 'w', encoding='utf-8') as f:
    json.dump(multimodal_data_test, f, ensure_ascii=False, indent=2)
# 6.3 数据集info
data_info = {
    "aier_cold_start_train": {
        "file_name": "aier_cold_start_train.json",
        "columns": {
            "prompt": "instruction",
            "query": "input",
            "response": "output",
            "images": "images"
        }
    },
    "aier_cold_start_test": {
        "file_name": "aier_cold_start_test.json",
        "columns": {
            "prompt": "instruction",
            "query": "input",
            "response": "output",
            "images": "images"
        }
    }
}
with open(os.path.join(save_dir, "dataset_info.json"), 'w', encoding='utf-8') as f:
    json.dump(data_info, f, ensure_ascii=False, indent=2)

print("所有数据集生成完毕。")
