# -*- coding: utf-8 -*-
"""
@file: data_sampler.py

采样策略说明:
---------------
本脚本用于从原始分类标注的图片数据集中采样均衡高置信度子集（不包含正常眼底，label=25）。
采样流程如下：

1. **每一类（label ≠ 25）优先采样不超过 per_class_target（如30）张**，按置信度从高到低排序挑选；
   - 如果某类不足 per_class_target 张，则全部采完。
2. **统计所有类别已采集图片总数**，若少于 target_total（如1000/1200）：
   - 在剩余未被采样的图片池中（仍不含label 25），**随机补足**到目标数量。
   - 采样不会重复同一图片。
3. **采样结果处理与标注**：
   - 对采样图片左上角写置信度与公开标签信息，字体自定义（默认7.5号，白色）。
   - 文件名保持原名，全部保存至输出目录。
   - 生成`src2tar.json`，记录源路径与目标路径的映射。

使用方法示例见文件尾部。

依赖:
- tqdm  进度条
- pillow  图片处理

"""

import os
import json
import random
from collections import defaultdict
from tqdm import tqdm
from utils.data_sample import draw_label_info_on_image

def sample_and_label_images(
    quality_json_path,
    label_map_json_path,
    output_dir,
    font_path='./SarasaMonoSC-Regular.ttf',
    font_size=7.5,
    target_total=1200,
    per_class_target=30
):
    with open(quality_json_path, 'r', encoding='utf-8') as f:
        path2info = json.load(f)
    with open(label_map_json_path, 'r', encoding='utf-8') as f:
        label_map = json.load(f)
    id2label = {str(v): k for k, v in label_map.items()}

    label2items = defaultdict(list)
    unused = []

    # 按类别分组，排除label=25
    for img_path, info in path2info.items():
        label = info['label']
        conf = info['confidence']
        if label == 25:
            continue
        label2items[str(label)].append((img_path, conf, info))

    selected_set = set()
    selected = []

    # 每类优先采样per_class_target个
    for label, items in label2items.items():
        items.sort(key=lambda x: -x[1])  # 置信度降序
        n = min(per_class_target, len(items))
        picked = items[:n]
        for item in picked:
            selected.append(item)
            selected_set.add(item[0])
        if len(items) > n:
            unused.extend(items[n:])

    # 剩下的未采过的图片池
    for label, items in label2items.items():
        for item in items:
            if item[0] not in selected_set:
                unused.append(item)

    # 不够则随机补足
    if len(selected) < target_total:
        unused_high_conf = [item for item in unused if item[1] > 0.5 and item[0] not in selected_set]
        random.shuffle(unused_high_conf)
        for item in unused_high_conf:
            if len(selected) >= target_total:
                break
            selected.append(item)
            selected_set.add(item[0])
        # 如果还不够，再用低置信度补足
        if len(selected) < target_total:
            unused_low_conf = [item for item in unused if item[1] <= 0.5 and item[0] not in selected_set]
            random.shuffle(unused_low_conf)
            for item in unused_low_conf:
                if len(selected) >= target_total:
                    break
                selected.append(item)
                selected_set.add(item[0])

    # 批量处理图片并保存
    os.makedirs(output_dir, exist_ok=True)
    src2tar = {}
    for img_path, conf, info in tqdm(selected, desc="处理采样图片"):
        fname = os.path.basename(img_path)
        tar_path = os.path.join(output_dir, fname)
        draw_label_info_on_image(
            img_path, conf, info['label'], id2label, tar_path,
            font_path=font_path, font_size=font_size
        )
        src2tar[img_path] = tar_path

    # 保存映射文件
    with open(os.path.join(output_dir, 'src2tar.json'), 'w', encoding='utf-8') as f:
        json.dump(src2tar, f, ensure_ascii=False, indent=2)

# 用法示例
if __name__ == "__main__":
    sample_and_label_images(
        './outputs/path2quality.json',
        './experiments/data_selector/diag_label_mapping.json',
        '/home/ma-user/work/Dataset/med_pub_data_sample',
        font_path='./SarasaMonoSC-Regular.ttf',
        font_size=1.5,
        target_total=1300,
        per_class_target=30
    )
