import json

# ==== 配置输入/输出路径 ====
GROUP_JSON = "user_data/json_data/group_by_capt.json"     # 形如 [{"Caption": "...", "Sources": ["...","..."]}, ...] 或 [{"Sources": [...]}]
CANDI_JSON = "user_data/json_data/retrieval_txt.json"     # 形如 [{"image": "...", "topn": [{"rank":1,"caption":"..."}, ...]}, ...]
OUTPUT_JSON = "user_data/json_data/union_top10.json"

# ==== 辅助函数 ====
def normalize(s: str) -> str:
    # 规范化用于比较：去首尾空白、合并内部空白、统一小写
    return " ".join((s or "").strip().split()).lower()

def top10_captions(entry):
    """从 candi.json 的一条记录中取 rank<=10 的 caption 列表（保留原文），并构建规范化映射。"""
    caps = []
    for item in entry.get("topn", []):
        r = item.get("rank")
        if r is None:
            continue
        if r <= 10:
            cap = item.get("caption", "")
            caps.append(cap)
    # 规范化 -> 原文（保留第一个出现的原文作为代表）
    norm2orig = {}
    for c in caps:
        n = normalize(c)
        if n and n not in norm2orig:
            norm2orig[n] = c
    return set(norm2orig.keys()), norm2orig

# ==== 读取文件 ====
with open(GROUP_JSON, "r", encoding="utf-8") as f:
    groups = json.load(f)

with open(CANDI_JSON, "r", encoding="utf-8") as f:
    candi = json.load(f)

# 将 candi 索引为 image->(norm_set, norm->orig)
candi_by_image = {}
for item in candi:
    img = item.get("image")
    if not img:
        continue
    norm_set, norm2orig = top10_captions(item)
    candi_by_image[img] = (norm_set, norm2orig)

def extract_sources(g):
    if isinstance(g, dict):
        if "Sources" in g:
            return g["Sources"]
        if "sources" in g:
            return g["sources"]
    return []

# ==== 计算每组的并集 ====
output = []
for g in groups:
    sources = extract_sources(g)
    if not sources:
        continue

    union_norm = set()
    merged_norm2orig = {}

    for src in sources:
        if src not in candi_by_image:
            # 若某个 source 在 candi.json 中不存在，跳过即可（不影响并集）
            continue

        norm_set, norm2orig = candi_by_image[src]
        # 合并原文映射（只在缺失时记录，保证稳定代表）
        for k, v in norm2orig.items():
            merged_norm2orig.setdefault(k, v)

        union_norm |= norm_set  # ← 与交集不同，这里取并集

    # 将并集规范化标题还原为原文列表（按字典序稳定输出）
    union_caps = [merged_norm2orig[n] for n in sorted(union_norm)] if union_norm else []

    output.append({
        "sources": sources,
        "union": union_caps
    })

# ==== 写出结果 ====
with open(OUTPUT_JSON, "w", encoding="utf-8") as f:
    json.dump(output, f, ensure_ascii=False, indent=2)

print(f"Done. Wrote {OUTPUT_JSON} with {len(output)} groups.")
