#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
匹配三个星表并检测变量候选
1) 从 images_u.list 中读取文件路径，替换为 CalMag 星表
2) 用 astropy.match_coordinates_sky 做三重匹配（A-B, A-C）
3) 检测显著性变量候选（至少两对比较满足条件）
4) 输出统计信息、保存匹配结果与 DS9 region 文件
5) 为所有候选生成 light curve URL（对 + 等字符进行 URL 编码）
6) 所有 .reg、.fits、.csv、.txt 统一保存到 lightcurves/
"""

import os, re, argparse
import numpy as np
from typing import List, Optional, Tuple
from urllib.parse import quote
from astropy.table import Table, join
from astropy.coordinates import SkyCoord, match_coordinates_sky
import astropy.units as u

# ---- 可能的列名 ----
RA_CANDS   = ["ALPHA_J2000", "RA", "ra"]
DEC_CANDS  = ["DELTA_J2000", "DEC", "dec"]
MAG_PREFS  = ["CALMAG_22", "CALMAG_12", "MAG_AUTO", "mag"]
MERR_PREFS = ["MAGERR_AUTO_S", "MAGERR_AUTO", "MAGERR", "magerr"]
TIME_CANDS = ["MJD", "HJD", "BJD", "JD", "MJD-OBS", "obs_mjd"]
BAND_CANDS = ["FILTER", "filter", "BAND", "band"]

SIG_SYS_DEFAULT = 0.02
LC_BASE_URL = "http://192.168.16.70:8082/light_curve_V20250303"
OUT_DIR = "lightcurves"

# ---------------- 小工具 ----------------

def _ensure_outdir() -> None:
    os.makedirs(OUT_DIR, exist_ok=True)

def _p(*names: str) -> str:
    """拼到输出目录下的路径"""
    return os.path.join(OUT_DIR, *names)

def _coerce_object_str_columns(tab: Table, width: int = 256) -> Table:
    """
    将表中 dtype=object 的列转为定长 Unicode（U{width}），以便可写入 FITS。
    """
    if len(tab) == 0:
        return tab
    out = tab.copy()
    for name in out.colnames:
        if out[name].dtype.kind == 'O':
            out[name] = np.array(out[name], dtype=f"U{width}")
    return out

# ---------------- RA/DEC 格式化 ----------------

def _format_ra_hms(ra_deg: float, sec_prec: int = 4) -> str:
    """RA(度) -> H:MM:SS.ssss"""
    ra_deg = ra_deg % 360.0
    total_seconds = (ra_deg / 15.0) * 3600.0
    h = int(total_seconds // 3600)
    m = int((total_seconds - h * 3600) // 60)
    s = total_seconds - h * 3600 - m * 60
    s_rounded = round(s, sec_prec)
    if s_rounded >= 60.0:
        s_rounded = 0.0
        m += 1
        if m >= 60:
            m = 0
            h += 1
            if h >= 24:
                h = 0
    return f"{h}:{m:02d}:{s_rounded:0{2 + 1 + sec_prec}.{sec_prec}f}"

def _format_dec_dms(dec_deg: float, sec_prec: int = 3) -> str:
    """Dec(度) -> ±DD:MM:SS.sss"""
    sign = '+' if dec_deg >= 0 else '-'
    x = abs(dec_deg)
    total_seconds = x * 3600.0
    d = int(total_seconds // 3600)
    m = int((total_seconds - d * 3600) // 60)
    s = total_seconds - d * 3600 - m * 60
    s_rounded = round(s, sec_prec)
    if s_rounded >= 60.0:
        s_rounded = 0.0
        m += 1
        if m >= 60:
            m = 0
            d += 1
    return f"{sign}{d:02d}:{m:02d}:{s_rounded:0{2 + 1 + sec_prec}.{sec_prec}f}"

def _build_lightcurve_url(ra_deg: float, dec_deg: float) -> str:
    """拼接并编码 light curve URL"""
    ra_str = _format_ra_hms(float(ra_deg), sec_prec=4)
    dec_str = _format_dec_dms(float(dec_deg), sec_prec=3)
    ra_enc = quote(ra_str, safe='')   # 编码 0-9A-Za-z 以外所有字符（包括 : 和 + 等）
    dec_enc = quote(dec_str, safe='')
    return f"{LC_BASE_URL}?ra={ra_enc}&dec={dec_enc}"

# ---------------- 读表/匹配/输出 ----------------

def read_list_file(list_path: str) -> List[str]:
    """读取 list 文件，并替换 _sciimg.fits 为 CalMag 星表路径"""
    out = []
    with open(list_path, "r", encoding="utf-8", errors="ignore") as f:
        for ln in f:
            ln = ln.strip()
            if not ln or ln.startswith("#"):
                continue
            if ln.endswith("_sciimg.fits"):
                ln = ln.replace("_sciimg.fits", "_sciimg_sexcat_CalMag.fits")
            out.append(os.path.join("/mnt/7b21f1e1-eb25-4cd5-bdb5-06d7d82fa253/Temp/force_photmetry/images/00109201113", ln))
    return out

def _pick_col(colnames: List[str], prefs: List[str]) -> Optional[str]:
    """根据优先级选择列名"""
    for c in prefs:
        if c in colnames:
            return c
    lower = {c.lower(): c for c in colnames}
    for c in prefs:
        if c.lower() in lower:
            return lower[c.lower()]
    return None

def _read_table_drop_multidim(path: str) -> Table:
    """读取 FITS 星表 hdu=2，并去掉多维列"""
    tab = Table.read(path, hdu=2, format="fits")
    keep = [name for name in tab.colnames if len(tab[name].shape) <= 1]
    return tab[keep]

def read_catalog(path: str) -> Table:
    """读取星表并规范化列"""
    tab = _read_table_drop_multidim(path)
    ra   = _pick_col(tab.colnames, RA_CANDS)
    dec  = _pick_col(tab.colnames, DEC_CANDS)
    mag  = _pick_col(tab.colnames, MAG_PREFS)
    merr = _pick_col(tab.colnames, MERR_PREFS)
    if ra is None or dec is None or mag is None:
        raise ValueError(f"[{path}] 无法识别 RA/DEC/mag 列，现有列：{tab.colnames}")
    tcol = _pick_col(tab.colnames, TIME_CANDS)
    bcol = _pick_col(tab.colnames, BAND_CANDS)
    out = Table()
    out["ra"] = np.array(tab[ra], dtype=float)
    out["dec"] = np.array(tab[dec], dtype=float)
    out["mag"] = np.array(tab[mag], dtype=float)
    out["magerr"] = np.array(tab[merr], dtype=float) if merr else np.full(len(out), 0.05)
    out["mjd"] = np.array(tab[tcol], dtype=float) if tcol else np.full(len(out), np.nan)
    if bcol:
        out["band"] = np.array(tab[bcol]).astype(str)
    else:
        m = re.search(r"[_\.]([ugrizyUBVRI])[_\.]", os.path.basename(path))
        out["band"] = np.full(len(out), m.group(1) if m else "unknown")
    return out

def xmatch_one_to_one(a: Table, b: Table, radius_arcsec: float) -> Table:
    """用 astropy.match_coordinates_sky 做一对一匹配"""
    c_a = SkyCoord(a["ra"] * u.deg, a["dec"] * u.deg)
    c_b = SkyCoord(b["ra"] * u.deg, b["dec"] * u.deg)

    idx_b, sep2d, _ = match_coordinates_sky(c_a, c_b)
    mask = sep2d <= (radius_arcsec * u.arcsec)
    if np.sum(mask) == 0:
        return Table()

    pairs = Table()
    pairs["i1"] = np.arange(len(a))[mask]
    pairs["i2"] = idx_b[mask].astype(int)
    pairs["sep_arcsec"] = sep2d[mask].arcsec

    a_with_idx = a.copy(); a_with_idx["a_row"] = np.arange(len(a))
    b_with_idx = b.copy(); b_with_idx["b_row"] = np.arange(len(b))

    m = join(pairs, a_with_idx, keys_left="i1", keys_right="a_row", join_type="left")
    m.rename_columns(["ra","dec","mag","magerr","mjd","band"],
                     ["a_ra","a_dec","a_mag","a_magerr","a_mjd","a_band"])

    m = join(m, b_with_idx, keys_left="i2", keys_right="b_row", join_type="left")
    m.rename_columns(["ra","dec","mag","magerr","mjd","band"],
                     ["b_ra","b_dec","b_mag","b_magerr","b_mjd","b_band"])
    return m

def _pick_like(colnames, prefer: str) -> Optional[str]:
    """
    在 colnames 中寻找 prefer 或 prefer_数字 的列名（如 i2 或 i2_1 / i2_2）。
    """
    if prefer in colnames:
        return prefer
    pat = re.compile(rf"^{re.escape(prefer)}(_\d+)?$")
    for c in colnames:
        if pat.fullmatch(c):
            return c
    return None

def split_matched_unmatched(a: Table, b: Table, matched: Table,
                            i1_name: str = "i1", i2_name: str = "i2") -> Tuple[Table, Table]:
    """
    分离未匹配源，并把原始行号写回 a_row / b_row。
    i1_name / i2_name 可显式指定（例如 triple 表里是 i2_1 / i2_2）。
    """
    if len(matched) == 0:
        # 全部未匹配
        a_only = a.copy()
        b_only = b.copy()
        a_only["a_row"] = np.arange(len(a_only), dtype=int)
        b_only["b_row"] = np.arange(len(b_only), dtype=int)
        return a_only, b_only

    i1_col = _pick_like(matched.colnames, i1_name)
    i2_col = _pick_like(matched.colnames, i2_name)
    if i1_col is None or i2_col is None:
        print(f"[split_matched_unmatched] 未找到索引列：{i1_name}/{i2_name}，返回空表")
        return Table(), Table()

    a_idx_all = np.arange(len(a), dtype=int)
    b_idx_all = np.arange(len(b), dtype=int)
    a_matched = np.unique(np.asarray(matched[i1_col], dtype=int))
    b_matched = np.unique(np.asarray(matched[i2_col], dtype=int))

    a_unmatched_idx = np.setdiff1d(a_idx_all, a_matched)
    b_unmatched_idx = np.setdiff1d(b_idx_all, b_matched)

    a_only = a[a_unmatched_idx].copy()
    b_only = b[b_unmatched_idx].copy()

    # 写回原始行号，供后续合并使用
    a_only["a_row"] = a_unmatched_idx
    b_only["b_row"] = b_unmatched_idx
    return a_only, b_only

def write_ds9_unmatched(a_only: Table, b_only: Table,
                        out_a: str, out_b: str, r_arcsec: float = 1.5):
    """输出未匹配源 DS9 region（路径由调用方传入）"""
    def _dump(tab: Table, color: str, out: str):
        lines = [
            "# Region file format: DS9 version 4.1",
            f"global color={color} dashlist=8 3 width=1 font='helvetica 10 normal'",
            "fk5",
        ]
        ra_col = "ra" if "ra" in tab.colnames else _pick_like(tab.colnames, "ra")
        dec_col = "dec" if "dec" in tab.colnames else _pick_like(tab.colnames, "dec")
        if ra_col is None or dec_col is None:
            with open(out, "w", encoding="utf-8") as f:
                f.write("\n".join(lines) + "\n")
            print(f"未匹配 region ({color}): {os.path.abspath(out)} [无坐标列，空文件]")
            return

        for row in tab:
            try:
                ra = float(row[ra_col]); dec = float(row[dec_col])
                lines.append(f'circle({ra},{dec},{r_arcsec}") # color={color}')
            except Exception:
                continue
        with open(out, "w", encoding="utf-8") as f:
            f.write("\n".join(lines) + "\n")
        print(f"未匹配 region ({color}): {os.path.abspath(out)}")
    _dump(a_only, "cyan",  out_a)
    _dump(b_only, "yellow", out_b)

def write_ds9_regions(all_tab: Table, cand_tab: Table,
                      out_all: str, out_cand: str,
                      r_all_arcsec: float = 1.5, r_cand_arcsec: float = 2.5):
    """输出匹配源（绿）与候选（红）的 DS9 region"""
    header = [
        "# Region file format: DS9 version 4.1",
        "global color=green dashlist=8 3 width=1 font='helvetica 10 normal'",
        "fk5",
    ]
    lines = header.copy()
    for row in all_tab:
        try:
            ra   = float(row["ra"]); dec = float(row["dec"])
            S    = float(row["S"]) if "S" in row.colnames else np.nan
            dmag = float(row["dmag"]) if "dmag" in row.colnames else np.nan
            sep  = float(row["sep_arcsec"]) if "sep_arcsec" in row.colnames else np.nan
            lines.append(
                f'circle({ra},{dec},{r_all_arcsec}") '
                f'# color=green width=1 text={{S={S:.1f} Δm={dmag:+.3f} sep={sep:.2f}"}}'
            )
        except Exception:
            continue
    with open(out_all, "w", encoding="utf-8") as f:
        f.write("\n".join(lines) + "\n")

    lines = [
        "# Region file format: DS9 version 4.1",
        "global color=red dashlist=8 3 width=2 font='helvetica 10 bold'",
        "fk5",
    ]
    for row in cand_tab:
        try:
            ra = float(row["ra"]); dec = float(row["dec"])
            S  = float(row["S"]);  dmag = float(row["dmag"])
            lines.append(
                f'circle({ra},{dec},{r_cand_arcsec}") '
                f'# color=red width=2 text={{VAR S={S:.1f} Δm={dmag:+.3f}}}'
            )
        except Exception:
            continue
    with open(out_cand, "w", encoding="utf-8") as f:
        f.write("\n".join(lines) + "\n")

    print(f"🟢 所有匹配 region: {os.path.abspath(out_all)}")
    print(f"🔴 候选变量 region: {os.path.abspath(out_cand)}")

def process_three_catalogs(catA: str, catB: str, catC: str, radius_arcsec: float,
                         sig_sys: float, s_thresh: float, min_amp: float,
                         same_src_sep: float, same_band: bool):
    """处理三个星表，返回 (候选简表, A, B, C, 三重匹配表)"""
    a = read_catalog(catA)
    b = read_catalog(catB)
    c = read_catalog(catC)

    print(f"📊 星表 A 源数: {len(a)}")
    print(f"📊 星表 B 源数: {len(b)}")
    print(f"📊 星表 C 源数: {len(c)}")

    # 匹配 A 和 B
    matched_AB = xmatch_one_to_one(a, b, radius_arcsec)
    print(f"🔍 A-B 初步匹配数 (<= {radius_arcsec}\"): {len(matched_AB)}")
    
    # 匹配 A 和 C
    matched_AC = xmatch_one_to_one(a, c, radius_arcsec)
    print(f"🔍 A-C 初步匹配数 (<= {radius_arcsec}\"): {len(matched_AC)}")
    
    if len(matched_AB) == 0 or len(matched_AC) == 0:
        print("⚠️ 无足够匹配源进行三重匹配")
        return Table(), a, b, c, Table()

    # 修复列名冲突：确保AB和AC匹配表使用不同的前缀
    # 1. 将matched_AC中的'a_'前缀改为'a2_'（临时），'b_'改为'c_'
    if len(matched_AC) > 0:
        for col in list(matched_AC.colnames):
            if col.startswith('a_'):
                new_col = col.replace('a_', 'a2_', 1)
                matched_AC.rename_column(col, new_col)
            elif col.startswith('b_'):
                new_col = col.replace('b_', 'c_', 1)
                matched_AC.rename_column(col, new_col)
        # 修复分离距离列
        if 'sep_arcsec' in matched_AC.colnames:
            matched_AC.rename_column('sep_arcsec', 'sep_AC')
    
    # 2. 重命名matched_AB中的分离距离列
    if 'sep_arcsec' in matched_AB.colnames:
        matched_AB.rename_column('sep_arcsec', 'sep_AB')
    
    print(f"✅ A-B 有效匹配数: {len(matched_AB)}")
    print(f"✅ A-C 有效匹配数: {len(matched_AC)}")
    
    # 3. 合并三重匹配（A为参考）
    triple_matched = join(matched_AB, matched_AC, keys="i1", join_type='inner')
    print(f"🌟 三重匹配源数 (A同时匹配B和C): {len(triple_matched)}")
    
    if len(triple_matched) == 0:
        return Table(), a, b, c, triple_matched

    # 4. 清理重复的A星表列（a2_* 与 a_* 内容相同，可以删除）
    cols_to_remove = [col for col in triple_matched.colnames if col.startswith('a2_')]
    if cols_to_remove:
        triple_matched.remove_columns(cols_to_remove)
    
    # 5. 两两比较统计量
    dmag_AB = triple_matched["b_mag"] - triple_matched["a_mag"]
    sig_AB = np.sqrt(triple_matched["a_magerr"]**2 + triple_matched["b_magerr"]**2 + 2*sig_sys**2)
    S_AB = np.abs(dmag_AB) / np.where(sig_AB > 0, sig_AB, np.nan)
    
    dmag_AC = triple_matched["c_mag"] - triple_matched["a_mag"]
    sig_AC = np.sqrt(triple_matched["a_magerr"]**2 + triple_matched["c_magerr"]**2 + 2*sig_sys**2)
    S_AC = np.abs(dmag_AC) / np.where(sig_AC > 0, sig_AC, np.nan)
    
    dmag_BC = triple_matched["c_mag"] - triple_matched["b_mag"]
    sig_BC = np.sqrt(triple_matched["b_magerr"]**2 + triple_matched["c_magerr"]**2 + 2*sig_sys**2)
    S_BC = np.abs(dmag_BC) / np.where(sig_BC > 0, sig_BC, np.nan)
    
    # 标记满足条件的比较
    cond_AB = (S_AB >= s_thresh) & (np.abs(dmag_AB) >= min_amp)
    cond_AC = (S_AC >= s_thresh) & (np.abs(dmag_AC) >= min_amp)
    cond_BC = (S_BC >= s_thresh) & (np.abs(dmag_BC) >= min_amp)
    
    # 候选条件：至少两对比较满足条件
    is_candidate = (cond_AB.astype(int) + cond_AC.astype(int) + cond_BC.astype(int)) >= 2
    cand_num = int(np.sum(is_candidate))
    
    # 构建输出表（以 A 坐标为准）
    out = Table()
    out["ra"] = triple_matched["a_ra"]
    out["dec"] = triple_matched["a_dec"]
    out["sep_AB"] = triple_matched["sep_AB"]
    out["sep_AC"] = triple_matched["sep_AC"]
    out["mag_A"] = triple_matched["a_mag"]
    out["mag_B"] = triple_matched["b_mag"]
    out["mag_C"] = triple_matched["c_mag"]
    out["dmag_AB"] = dmag_AB
    out["dmag_AC"] = dmag_AC
    out["dmag_BC"] = dmag_BC
    out["S_AB"] = S_AB
    out["S_AC"] = S_AC
    out["S_BC"] = S_BC
    
    # 计算最大显著性和最大振幅用于显示
    out["max_S"] = np.max([S_AB, S_AC, S_BC], axis=0)
    out["max_amp"] = np.max([np.abs(dmag_AB), np.abs(dmag_AC), np.abs(dmag_BC)], axis=0)
    
    # 设置region显示用的S和dmag（使用最大值）
    out["S"] = out["max_S"]
    out["dmag"] = out["max_amp"]  # 显示为正值（变化幅度）
    
    out["is_candidate"] = is_candidate
    
    print(f"🌟 变量候选数 (至少两对满足 S>={s_thresh}, |Δm|>={min_amp}): {cand_num}")

    # 生成候选URL
    urls = []
    for ra_deg, dec_deg, flag in zip(out["ra"], out["dec"], out["is_candidate"]):
        urls.append(_build_lightcurve_url(ra_deg, dec_deg) if bool(flag) else "")
    out["url"] = np.array(urls, dtype="U256")

    # 保存三重匹配表
    triple_fits = _p("triple_matched_catalog.fits")
    triple_csv = _p("triple_matched_catalog.csv")
    triple_matched.write(triple_csv, format="ascii.csv", overwrite=True)
    triple_to_fits = _coerce_object_str_columns(triple_matched, width=256)
    triple_to_fits.write(triple_fits, format="fits", overwrite=True)
    print(f"💾 三重匹配星表 CSV:  {os.path.abspath(triple_csv)}")
    print(f"💾 三重匹配星表 FITS: {os.path.abspath(triple_fits)}")

    return out, a, b, c, triple_matched

def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--list", default="/mnt/7b21f1e1-eb25-4cd5-bdb5-06d7d82fa253/Temp/force_photmetry/images/00109201113/images_v.list")
    ap.add_argument("--radius", type=float, default=2.0)
    ap.add_argument("--same-src-sep", type=float, default=1.0)
    ap.add_argument("--same-band", action="store_true")
    ap.add_argument("--sig-sys", type=float, default=SIG_SYS_DEFAULT)
    ap.add_argument("--s-thresh", type=float, default=5.0)
    ap.add_argument("--min-amp", type=float, default=0.3)
    ap.add_argument("--out", default="var_candidates.csv")
    args = ap.parse_args()

    _ensure_outdir()

    paths = read_list_file(args.list)
    if len(paths) < 3:
        raise RuntimeError("list 文件必须至少包含三个 fits 路径（当前只找到 {} 个)".format(len(paths)))
    
    catA, catB, catC = paths[0], paths[1], paths[2]
    print(f"读取三重匹配:\n  A: {catA}\n  B: {catB}\n  C: {catC}")

    res, a, b, c, triple_matched = process_three_catalogs(
        catA, catB, catC,
        radius_arcsec=args.radius,
        sig_sys=args.sig_sys,
        s_thresh=args.s_thresh,
        min_amp=args.min_amp,
        same_src_sep=args.same_src_sep,
        same_band=args.same_band
    )

    if len(res) == 0:
        print("⚠️ 无结果")
        return

    # 仅候选写出（CSV+FITS），以及 URL 清单
    cands = res[res["is_candidate"] == True]
    out_csv  = _p(args.out)
    out_fits = _p(os.path.splitext(args.out)[0] + ".fits")

    cands.write(out_csv,  format="ascii.csv", overwrite=True)
    cands_to_fits = _coerce_object_str_columns(cands, width=256)
    cands_to_fits.write(out_fits, format="fits", overwrite=True)

    print(f"✅ 候选 CSV:  {os.path.abspath(out_csv)}")
    print(f"✅ 候选 FITS: {os.path.abspath(out_fits)}")
    if len(cands) > 0:
        print("示例候选（前5条）：")
        for i in range(min(5, len(cands))):
            print(f"  RA={cands['ra'][i]:.6f}  DEC={cands['dec'][i]:+.6f}  "
                  f"max_S={cands['max_S'][i]:.1f}  max_amp={cands['max_amp'][i]:.3f}  URL={cands['url'][i]}")

    # 另外导出纯文本 URL 清单（仅候选）
    url_list_path = _p("var_candidate_urls.txt")
    with open(url_list_path, "w", encoding="utf-8") as f:
        for url in cands["url"]:
            if isinstance(url, str) and url:
                f.write(url + "\n")
    print(f"🔗 候选 URL 清单: {os.path.abspath(url_list_path)}")

    # DS9 regions -> lightcurves/
    write_ds9_regions(
        all_tab=res,
        cand_tab=cands,
        out_all=_p("var_all_matches.reg"),
        out_cand=_p("var_candidates.reg")
    )

    # 未匹配源（A vs B, A vs C）
    # 注意：triple_matched 里 i2 会被拆成 i2_1（来自 AB）与 i2_2（来自 AC）
    a_only_AB, b_only = split_matched_unmatched(a, b, triple_matched, i1_name="i1", i2_name="i2_1")
    a_only_AC, c_only = split_matched_unmatched(a, c, triple_matched, i1_name="i1", i2_name="i2_2")
    
    # 合并A的未匹配源（B和C中未匹配的部分）
    if len(a_only_AB) == 0 and len(a_only_AC) == 0:
        a_only = Table()
    elif len(a_only_AB) == 0:
        a_only = a_only_AC
    elif len(a_only_AC) == 0:
        a_only = a_only_AB
    else:
        a_only = join(a_only_AB, a_only_AC, keys="a_row", join_type='outer')

        # 统一 ra/dec 字段，避免 ra_1/ra_2 残留
        def _merge_cols(tab: Table, c1: str, c2: str, outc: str):
            if c1 in tab.colnames and c2 in tab.colnames:
                arr = np.array(tab[c1], dtype=float)
                mask = ~np.isfinite(arr)
                arr2 = np.array(tab[c2], dtype=float)
                arr[mask] = arr2[mask]
                tab.remove_columns([c1, c2])
                tab[outc] = arr
            elif c1 in tab.colnames and outc not in tab.colnames:
                tab.rename_column(c1, outc)
            elif c2 in tab.colnames and outc not in tab.colnames:
                tab.rename_column(c2, outc)

        _merge_cols(a_only, "ra_1", "ra_2", "ra")
        _merge_cols(a_only, "dec_1", "dec_2", "dec")
        # 若仍没有 ra/dec，但有 ra / dec 单列就不处理；这里已尽量统一

    # 输出未匹配 region
    write_ds9_unmatched(
        a_only, b_only,
        out_a=_p("unmatched_A_vs_B.reg"),
        out_b=_p("unmatched_B.reg")
    )
    write_ds9_unmatched(
        a_only, c_only,
        out_a=_p("unmatched_A_vs_C.reg"),
        out_b=_p("unmatched_C.reg")
    )

if __name__ == "__main__":
    main()
