#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
匹配三个星表并检测变量候选 + 3点序列聚类与分簇画图 + 孤立森林异常检测
1) 从 images_u.list 中读取文件路径，替换为 CalMag 星表
2) 用 astropy.match_coordinates_sky 做三重匹配（A-B, A-C）
3) 检测显著性变量候选（至少两对比较满足条件）
4) 输出统计信息、保存匹配结果与 DS9 region 文件
5) 为所有候选生成 light curve URL（对 + 等字符进行 URL 编码）
6) 将三重匹配得到的 3 点序列做 KMeans 聚类，为每一类画归一化叠加图
7) 孤立森林 IsolationForest 做异常检测，输出异常分、直方图、TopN叠加图与 region
8) 所有 .reg、.fits、.csv、.txt、.png 统一保存到 lightcurves/
"""

import os, re, argparse
import numpy as np
from typing import List, Optional, Tuple
from urllib.parse import quote

from astropy.table import Table, join
from astropy.coordinates import SkyCoord, match_coordinates_sky
import astropy.units as u

# 可选依赖：聚类 & 画图（聚类用到的依赖先尝试导入）
_SKLEARN_OK = True
try:
    from sklearn.preprocessing import StandardScaler
    from sklearn.cluster import KMeans
except Exception:
    _SKLEARN_OK = False

import matplotlib
matplotlib.use("Agg")  # 无显示环境也能出图
import matplotlib.pyplot as plt

# ---- 可能的列名 ----
RA_CANDS   = ["ALPHA_J2000", "RA", "ra"]
DEC_CANDS  = ["DELTA_J2000", "DEC", "dec"]
MAG_PREFS  = ["CALMAG_22", "CALMAG_12", "MAG_AUTO", "mag"]
MERR_PREFS = ["MAGERR_AUTO_S", "MAGERR_AUTO", "MAGERR", "magerr"]
TIME_CANDS = ["MJD", "HJD", "BJD", "JD", "MJD-OBS", "obs_mjd"]
BAND_CANDS = ["FILTER", "filter", "BAND", "band"]

SIG_SYS_DEFAULT = 0.02
LC_BASE_URL = "http://192.168.16.70:8082/light_curve_V20250303"
OUT_DIR = "lightcurves"
PLOT_DIR = os.path.join(OUT_DIR, "cluster_plots")

# ---------------- 小工具 ----------------

def _ensure_outdir() -> None:
    os.makedirs(OUT_DIR, exist_ok=True)
    os.makedirs(PLOT_DIR, exist_ok=True)

def _p(*names: str) -> str:
    """拼到输出目录下的路径"""
    return os.path.join(OUT_DIR, *names)

def _coerce_object_str_columns(tab: Table, width: int = 256) -> Table:
    """将表中 dtype=object 的列转为定长 Unicode，以便可写入 FITS。"""
    if len(tab) == 0:
        return tab
    out = tab.copy()
    for name in out.colnames:
        if out[name].dtype.kind == 'O':
            out[name] = np.array(out[name], dtype=f"U{width}")
    return out

def _to_ascii_safe(s: str) -> str:
    """将字符串中的非 ASCII 符号替换为近似 ASCII；仍无法编码的字符将被丢弃。"""
    if s is None:
        return ""
    s = str(s)
    repl = {
        "∧": "^",   "∨": "V",
        "↑": "up",  "↓": "down",
        "Δ": "d",   "±": "+/-",
        "—": "-",   "–": "-",
        "·": ".",   "：": ":", "，": ",",
        "（": "(",  "）": ")",
        "“": '"',   "”": '"', "‘": "'", "’": "'",
    }
    s = s.translate(str.maketrans(repl))
    try:
        s.encode("ascii")
        return s
    except UnicodeEncodeError:
        return s.encode("ascii", "ignore").decode("ascii")

def _fits_safe_table(tab: Table, width: int = 256) -> Table:
    """拷贝一份表，统一字符串列为 ASCII 安全的定长 Unicode，适配 FITS 写出。"""
    out = _coerce_object_str_columns(tab, width=width)
    for name in out.colnames:
        if out[name].dtype.kind in ("U", "O"):
            out[name] = np.array([_to_ascii_safe(x) for x in out[name]], dtype=f"U{width}")
    return out

# ---------------- RA/DEC & URL ----------------

def _format_ra_hms(ra_deg: float, sec_prec: int = 4) -> str:
    ra_deg = ra_deg % 360.0
    total_seconds = (ra_deg / 15.0) * 3600.0
    h = int(total_seconds // 3600)
    m = int((total_seconds - h * 3600) // 60)
    s = total_seconds - h * 3600 - m * 60
    s_rounded = round(s, sec_prec)
    if s_rounded >= 60.0:
        s_rounded = 0.0
        m += 1
        if m >= 60:
            m = 0; h += 1
            if h >= 24:
                h = 0
    return f"{h}:{m:02d}:{s_rounded:0{2 + 1 + sec_prec}.{sec_prec}f}"

def _format_dec_dms(dec_deg: float, sec_prec: int = 3) -> str:
    sign = '+' if dec_deg >= 0 else '-'
    x = abs(dec_deg)
    total_seconds = x * 3600.0
    d = int(total_seconds // 3600)
    m = int((total_seconds - d * 3600) // 60)
    s = total_seconds - d * 3600 - m * 60
    s_rounded = round(s, sec_prec)
    if s_rounded >= 60.0:
        s_rounded = 0.0
        m += 1
        if m >= 60:
            m = 0; d += 1
    return f"{sign}{d:02d}:{m:02d}:{s_rounded:0{2 + 1 + sec_prec}.{sec_prec}f}"

def _build_lightcurve_url(ra_deg: float, dec_deg: float) -> str:
    ra_str = _format_ra_hms(float(ra_deg), sec_prec=4)
    dec_str = _format_dec_dms(float(dec_deg), sec_prec=3)
    ra_enc = quote(ra_str, safe='')
    dec_enc = quote(dec_str, safe='')
    return f"{LC_BASE_URL}?ra={ra_enc}&dec={dec_enc}"

# ---------------- 读表/匹配/输出 ----------------

def read_list_file(list_path: str) -> List[str]:
    out = []
    with open(list_path, "r", encoding="utf-8", errors="ignore") as f:
        for ln in f:
            ln = ln.strip()
            if not ln or ln.startswith("#"):
                continue
            if ln.endswith("_sciimg.fits"):
                ln = ln.replace("_sciimg.fits", "_sciimg_sexcat_CalMag.fits")
            out.append(os.path.join("F:\\tools_mephisto\\images\\1278305", ln))
    return out

def _pick_col(colnames: List[str], prefs: List[str]) -> Optional[str]:
    for c in prefs:
        if c in colnames:
            return c
    lower = {c.lower(): c for c in colnames}
    for c in prefs:
        if c.lower() in lower:
            return lower[c.lower()]
    return None

def _read_table_drop_multidim(path: str) -> Table:
    tab = Table.read(path, hdu=2, format="fits")
    keep = [name for name in tab.colnames if len(tab[name].shape) <= 1]
    return tab[keep]

def read_catalog(path: str) -> Table:
    tab = _read_table_drop_multidim(path)
    ra   = _pick_col(tab.colnames, RA_CANDS)
    dec  = _pick_col(tab.colnames, DEC_CANDS)
    mag  = _pick_col(tab.colnames, MAG_PREFS)
    merr = _pick_col(tab.colnames, MERR_PREFS)
    if ra is None or dec is None or mag is None:
        raise ValueError(f"[{path}] 无法识别 RA/DEC/mag 列，现有列：{tab.colnames}")
    tcol = _pick_col(tab.colnames, TIME_CANDS)
    bcol = _pick_col(tab.colnames, BAND_CANDS)
    out = Table()
    out["ra"] = np.array(tab[ra], dtype=float)
    out["dec"] = np.array(tab[dec], dtype=float)
    out["mag"] = np.array(tab[mag], dtype=float)
    out["magerr"] = np.array(tab[merr], dtype=float) if merr else np.full(len(out), 0.05)
    out["mjd"] = np.array(tab[tcol], dtype=float) if tcol else np.full(len(out), np.nan)
    if bcol:
        out["band"] = np.array(tab[bcol]).astype(str)
    else:
        m = re.search(r"[_\.]([ugrizyUBVRI])[_\.]", os.path.basename(path))
        out["band"] = np.full(len(out), m.group(1) if m else "unknown")
    return out

def xmatch_one_to_one(a: Table, b: Table, radius_arcsec: float) -> Table:
    c_a = SkyCoord(a["ra"] * u.deg, a["dec"] * u.deg)
    c_b = SkyCoord(b["ra"] * u.deg, b["dec"] * u.deg)
    idx_b, sep2d, _ = match_coordinates_sky(c_a, c_b)
    mask = sep2d <= (radius_arcsec * u.arcsec)
    if np.sum(mask) == 0:
        return Table()

    pairs = Table()
    pairs["i1"] = np.arange(len(a))[mask]
    pairs["i2"] = idx_b[mask].astype(int)
    pairs["sep_arcsec"] = sep2d[mask].arcsec

    a_with_idx = a.copy(); a_with_idx["a_row"] = np.arange(len(a))
    b_with_idx = b.copy(); b_with_idx["b_row"] = np.arange(len(b))

    m = join(pairs, a_with_idx, keys_left="i1", keys_right="a_row", join_type="left")
    m.rename_columns(["ra","dec","mag","magerr","mjd","band"],
                     ["a_ra","a_dec","a_mag","a_magerr","a_mjd","a_band"])
    m = join(m, b_with_idx, keys_left="i2", keys_right="b_row", join_type="left")
    m.rename_columns(["ra","dec","mag","magerr","mjd","band"],
                     ["b_ra","b_dec","b_mag","b_magerr","b_mjd","b_band"])
    return m

def _pick_like(colnames, prefer: str) -> Optional[str]:
    if prefer in colnames:
        return prefer
    pat = re.compile(rf"^{re.escape(prefer)}(_\d+)?$")
    for c in colnames:
        if pat.fullmatch(c):
            return c
    return None

def split_matched_unmatched(a: Table, b: Table, matched: Table,
                            i1_name: str = "i1", i2_name: str = "i2") -> Tuple[Table, Table]:
    if len(matched) == 0:
        a_only = a.copy(); b_only = b.copy()
        a_only["a_row"] = np.arange(len(a_only), dtype=int)
        b_only["b_row"] = np.arange(len(b_only), dtype=int)
        return a_only, b_only

    i1_col = _pick_like(matched.colnames, i1_name)
    i2_col = _pick_like(matched.colnames, i2_name)
    if i1_col is None or i2_col is None:
        print(f"[split_matched_unmatched] 未找到索引列：{i1_name}/{i2_name}，返回空表")
        return Table(), Table()

    a_idx_all = np.arange(len(a), dtype=int)
    b_idx_all = np.arange(len(b), dtype=int)
    a_matched = np.unique(np.asarray(matched[i1_col], dtype=int))
    b_matched = np.unique(np.asarray(matched[i2_col], dtype=int))

    a_unmatched_idx = np.setdiff1d(a_idx_all, a_matched)
    b_unmatched_idx = np.setdiff1d(b_idx_all, b_matched)

    a_only = a[a_unmatched_idx].copy()
    b_only = b[b_unmatched_idx].copy()
    a_only["a_row"] = a_unmatched_idx
    b_only["b_row"] = b_unmatched_idx
    return a_only, b_only

def write_ds9_unmatched(a_only: Table, b_only: Table,
                        out_a: str, out_b: str, r_arcsec: float = 1.5):
    def _dump(tab: Table, color: str, out: str):
        lines = [
            "# Region file format: DS9 version 4.1",
            f"global color={color} dashlist=8 3 width=1 font='helvetica 10 normal'",
            "fk5",
        ]
        ra_col = "ra" if "ra" in tab.colnames else _pick_like(tab.colnames, "ra")
        dec_col = "dec" if "dec" in tab.colnames else _pick_like(tab.colnames, "dec")
        if ra_col is None or dec_col is None:
            with open(out, "w", encoding="utf-8") as f:
                f.write("\n".join(lines) + "\n")
            print(f"未匹配 region ({color}): {os.path.abspath(out)} [无坐标列，空文件]")
            return
        for row in tab:
            try:
                ra = float(row[ra_col]); dec = float(row[dec_col])
                lines.append(f'circle({ra},{dec},{r_arcsec}") # color={color}')
            except Exception:
                continue
        with open(out, "w", encoding="utf-8") as f:
            f.write("\n".join(lines) + "\n")
        print(f"未匹配 region ({color}): {os.path.abspath(out)}")
    _dump(a_only, "cyan",  out_a)
    _dump(b_only, "yellow", out_b)

def write_ds9_regions(all_tab: Table, cand_tab: Table,
                      out_all: str, out_cand: str,
                      r_all_arcsec: float = 1.5, r_cand_arcsec: float = 2.5):
    header = [
        "# Region file format: DS9 version 4.1",
        "global color=green dashlist=8 3 width=1 font='helvetica 10 normal'",
        "fk5",
    ]
    lines = header.copy()
    for row in all_tab:
        try:
            ra   = float(row["ra"]); dec = float(row["dec"])
            S    = float(row["S"]) if "S" in row.colnames else np.nan
            dmag = float(row["dmag"]) if "dmag" in row.colnames else np.nan
            sep  = float(row["sep_arcsec"]) if "sep_arcsec" in row.colnames else np.nan
            lines.append(
                f'circle({ra},{dec},{r_all_arcsec}") '
                f'# color=green width=1 text={{S={S:.1f} dmag={dmag:+.3f} sep={sep:.2f}"}}'
            )
        except Exception:
            continue
    with open(out_all, "w", encoding="utf-8") as f:
        f.write("\n".join(lines) + "\n")

    lines = [
        "# Region file format: DS9 version 4.1",
        "global color=red dashlist=8 3 width=2 font='helvetica 10 bold'",
        "fk5",
    ]
    for row in cand_tab:
        try:
            ra = float(row["ra"]); dec = float(row["dec"])
            S  = float(row["S"]);  dmag = float(row["dmag"])
            lines.append(
                f'circle({ra},{dec},{r_cand_arcsec}") '
                f'# color=red width=2 text={{VAR S={S:.1f} dmag={dmag:+.3f}}}'
            )
        except Exception:
            continue
    with open(out_cand, "w", encoding="utf-8") as f:
        f.write("\n".join(lines) + "\n")
    print(f"🟢 所有匹配 region: {os.path.abspath(out_all)}")
    print(f"🔴 候选变量 region: {os.path.abspath(out_cand)}")

# ---------------- 聚类：特征、打标签、画图 ----------------

def _row_sort_by_time(m, e, t):
    """对单条 3 点按时间升序排序；若 t 含 NaN/重复，按 A,B,C 原顺序并用 t=[0,1,2] 退化。"""
    m = np.asarray(m, float); e = np.asarray(e, float); t = np.asarray(t, float)
    if np.any(~np.isfinite(t)) or len(set(np.round(t, 12))) < 3:
        idx = np.array([0,1,2])
        t_sorted = np.array([0.0, 1.0, 2.0])
        return m[idx], e[idx], t_sorted
    idx = np.argsort(t)
    return m[idx], e[idx], t[idx]

def _build_features_from_triple(triple: Table, s_sys: float):
    """
    从 triple_matched 构建特征与作图所需的归一化序列。
    返回：
      X        : (N,7) 特征矩阵 [s1,s2,curv,curv_S,rt,amp,Smax]
      series_T : (N,3) 每条归一化时间 [0..1]
      series_M : (N,3) 每条减去自身均值后的 mag
    """
    N = len(triple)
    X = np.zeros((N, 7), dtype=float)
    Tn = np.zeros((N, 3), dtype=float)
    Mn = np.zeros((N, 3), dtype=float)

    a_m = np.array(triple["a_mag"], float)
    b_m = np.array(triple["b_mag"], float)
    c_m = np.array(triple["c_mag"], float)
    a_e = np.array(triple["a_magerr"], float)
    b_e = np.array(triple["b_magerr"], float)
    c_e = np.array(triple["c_magerr"], float)
    a_t = np.array(triple["a_mjd"], float)
    b_t = np.array(triple["b_mjd"], float)
    c_t = np.array(triple["c_mjd"], float)

    eps = 1e-12
    for i in range(N):
        m_raw = np.array([a_m[i], b_m[i], c_m[i]], float)
        e_raw = np.array([a_e[i], b_e[i], c_e[i]], float)
        t_raw = np.array([a_t[i], b_t[i], c_t[i]], float)

        m, e, t = _row_sort_by_time(m_raw, e_raw, t_raw)

        dt12 = max(t[1]-t[0], 1e-6)
        dt23 = max(t[2]-t[1], 1e-6)
        dt13 = max(t[2]-t[0], 1e-6)
        rt   = dt12 / dt13

        d12 = m[1]-m[0]; d23 = m[2]-m[1]
        s1  = d12 / dt12
        s2  = d23 / dt23
        curv = m[0] - 2*m[1] + m[2]

        sig_c = np.sqrt(e[0]**2 + 4*e[1]**2 + e[2]**2 + 6*(s_sys**2))
        curv_S = curv / max(sig_c, 1e-6)

        amp = np.max(m) - np.min(m)
        S12 = abs(d12) / np.sqrt(e[0]**2 + e[1]**2 + 2*s_sys**2 + eps)
        S23 = abs(d23) / np.sqrt(e[1]**2 + e[2]**2 + 2*s_sys**2 + eps)
        Smax = max(S12, S23)

        X[i,:] = [s1, s2, curv, curv_S, rt, amp, Smax]

        t0, t1 = t[0], t[2]
        if t1 - t0 < 1e-6:
            tn = np.array([0.0, 0.5, 1.0])
        else:
            tn = (t - t0) / (t1 - t0)
        mn = m - np.mean(m)
        Tn[i,:] = tn
        Mn[i,:] = mn

    return X, Tn, Mn

def _label_centers(centers_raw):
    """根据中心的 s1/s2/curv 给出语义标签。"""
    labels = []
    for s1, s2, curv, curv_S, rt, amp, Smax in centers_raw:
        mono_up   = (s1 < 0) and (s2 < 0)   # mag 下降 => 变亮
        mono_down = (s1 > 0) and (s2 > 0)   # mag 上升 => 变暗
        mid_bright = (curv > 0)             # m2 更亮（mag 更小）
        mid_faint  = (curv < 0)

        if mono_up:
            lab = "Monotonic brightening (mag down)"
        elif mono_down:
            lab = "Monotonic fading (mag up)"
        elif mid_bright:
            lab = "Middle brighter (V in mag)"
        elif mid_faint:
            lab = "Middle fainter (^ in mag)"
        else:
            lab = "Nearly constant / noisy"
        labels.append(lab)
    return labels

def _plot_cluster_overlay(tn, mn, idxs, title, outfile):
    """画叠加图：将 idxs 指定的序列（归一化后）叠加到一张图里；空簇也生成占位图。"""
    os.makedirs(os.path.dirname(outfile), exist_ok=True)
    plt.figure(figsize=(6,4))
    if len(idxs) == 0:
        plt.text(0.5, 0.5, "No members", ha="center", va="center", fontsize=14)
        plt.title(title)
        plt.axis("off")
        plt.tight_layout()
        plt.savefig(outfile, dpi=150)
        plt.close()
        return
    for i in idxs:
        plt.plot(tn[i], mn[i], marker='o', alpha=0.25, linewidth=1)
    mean_curve = np.nanmean(mn[idxs], axis=0)
    mean_time  = np.nanmean(tn[idxs], axis=0)
    plt.plot(mean_time, mean_curve, marker='o', linewidth=3)
    plt.xlabel("Normalized time")
    plt.ylabel("Magnitude (demeaned)")
    plt.title(title)
    plt.grid(alpha=0.3)
    plt.tight_layout()
    plt.savefig(outfile, dpi=150)
    plt.close()

def run_clustering_and_plots(triple: Table, out_tab: Table,
                             s_sys: float, k: int, random_state: int = 42):
    """
    对 triple_matched 进行聚类并出图，将簇编号与语义标签加入 out_tab。
    产物：
      - lightcurves/cluster_labels.csv
      - lightcurves/cluster_centers.csv
      - lightcurves/cluster_plots/cluster_*.png
    """
    if not _SKLEARN_OK:
        print("⚠️ 未安装 scikit-learn，跳过聚类与画图。请先安装：pip install scikit-learn")
        return None

    X_raw, Tn, Mn = _build_features_from_triple(triple, s_sys)
    print(f"🧮 聚类样本数: {len(X_raw)}，特征维度: {X_raw.shape[1]}")

    scaler = StandardScaler()
    Xs = scaler.fit_transform(X_raw)

    # 兼容旧版 sklearn 的 n_init
    try:
        km = KMeans(n_clusters=k, n_init="auto", random_state=random_state)
        labels = km.fit_predict(Xs)
    except TypeError:
        km = KMeans(n_clusters=k, n_init=10, random_state=random_state)
        labels = km.fit_predict(Xs)

    centers_raw = scaler.inverse_transform(km.cluster_centers_)
    center_labels = _label_centers(centers_raw)

    # 保存标签到 out_tab
    out_tab["cluster"] = np.array(labels, dtype=int)
    lab_str = np.array([center_labels[c] for c in labels], dtype="U64")
    out_tab["cluster_label"] = lab_str
    # 同时保存 ASCII 安全版，便于写 FITS
    out_tab["cluster_label_ascii"] = np.array([_to_ascii_safe(s) for s in lab_str], dtype="U64")

    # 另存一份 CSV（ra,dec,cluster,cluster_label）
    lab_tab = Table()
    lab_tab["ra"] = out_tab["ra"]
    lab_tab["dec"] = out_tab["dec"]
    lab_tab["cluster"] = out_tab["cluster"]
    lab_tab["cluster_label"] = out_tab["cluster_label"]
    lab_tab["cluster_label_ascii"] = out_tab["cluster_label_ascii"]
    lab_csv = _p("cluster_labels.csv")
    lab_tab.write(lab_csv, format="ascii.csv", overwrite=True)
    print(f"💾 聚类标签 CSV: {os.path.abspath(lab_csv)}")

    # 出图：每一类一张归一化叠加图（即便空簇也生成 PNG）
    for c in range(k):
        idxs = np.where(labels == c)[0]
        title = f"Cluster {c} (n={len(idxs)}) - {center_labels[c]}"
        outpng = os.path.join(PLOT_DIR, f"cluster_{c:02d}.png")
        _plot_cluster_overlay(Tn, Mn, idxs, title, outpng)
        print(f"🖼️ 簇 {c} 叠加图: {os.path.abspath(outpng)}")

    # 保存中心（原始特征尺度）方便检查
    centers_tab = Table(names=["s1","s2","curv","curv_S","rt","amp","Smax","label"],
                        dtype=[float,float,float,float,float,float,float,"U64"])
    for i in range(k):
        row = list(centers_raw[i]) + [center_labels[i]]
        centers_tab.add_row(row)
    centers_csv = _p("cluster_centers.csv")
    centers_tab.write(centers_csv, format="ascii.csv", overwrite=True)
    print(f"💾 聚类中心 CSV: {os.path.abspath(centers_csv)}")

    print(f"📂 聚类图片目录: {os.path.abspath(PLOT_DIR)}")
    return dict(labels=labels, centers=centers_raw, center_labels=center_labels)

# ---------------- 异常检测（孤立森林） ----------------

def _rank01_desc(a):
    """把数组按从大到小转成 [0,1) 排名分位；越大排名越靠前，返回值越接近 1。"""
    a = np.asarray(a, float)
    n = len(a)
    order = np.argsort(-a)          # 大在前
    rank = np.empty(n, dtype=float)
    rank[order] = np.arange(n) / max(n, 1)
    return 1.0 - rank               # 越异常越接近 1

def run_iforest_and_mark(triple: Table, out_tab: Table, s_sys: float,
                         contamination: float = 0.03, random_state: int = 42,
                         topn: int = 50):
    """
    在 7 维特征上运行孤立森林并回写到 out_tab：
      新列：
        - iforest_outlier   : True/False（模型判为异常）
        - iforest_score     : 异常强度（越大越异常；由 -decision_function 得到）
        - iforest_score01   : [0,1] 归一化异常分（越大越异常）
        - anomaly_hybrid01  : 与 max_S / max_amp 融合的综合分（越大越像变量）
    产物：
      - lightcurves/iforest_results.csv
      - lightcurves/var_predicted_by_iforest.csv
      - lightcurves/iforest_scores_hist.png
      - lightcurves/iforest_topN_overlay.png
      - lightcurves/var_iforest.reg（红），var_all_iforest.reg（绿）
    """
    try:
        from sklearn.ensemble import IsolationForest
        from sklearn.preprocessing import StandardScaler as _Std2
    except Exception:
        print("⚠️ 未安装 scikit-learn，跳过孤立森林。请先安装：pip install scikit-learn")
        return None

    # 1) 特征
    X_raw, Tn, Mn = _build_features_from_triple(triple, s_sys)
    if len(X_raw) == 0:
        print("⚠️ 无样本可做孤立森林")
        return None

    scaler = _Std2()
    Xs = scaler.fit_transform(X_raw)

    # 2) 模型（兼容旧版 sklearn 不支持 n_jobs）
    try:
        clf = IsolationForest(
            n_estimators=400, contamination=contamination,
            random_state=random_state, n_jobs=-1, bootstrap=False
        )
    except TypeError:
        clf = IsolationForest(
            n_estimators=400, contamination=contamination,
            random_state=random_state, bootstrap=False
        )

    preds = clf.fit_predict(Xs)              # -1 = outlier, 1 = inlier

    # decision_function: 正数=正常，负数=异常；取负数作为异常强度
    try:
        df = clf.decision_function(Xs)
        score = -df
    except Exception:
        ss = clf.score_samples(Xs)           # 越大越正常
        score = (np.max(ss) - ss)            # 反过来：越大越异常

    # 归一化到 [0,1]
    smin, smax = float(np.min(score)), float(np.max(score))
    score01 = (score - smin) / (smax - smin + 1e-12)

    # 3) 写回表
    out_tab["iforest_outlier"] = np.array(preds == -1, dtype=bool)
    out_tab["iforest_score"]   = np.array(score, dtype=float)
    out_tab["iforest_score01"] = np.array(score01, dtype=float)

    # 4) 与物理量融合一个“综合分”（分位融合，更稳健）
    S01   = _rank01_desc(out_tab["max_S"])
    Amp01 = _rank01_desc(out_tab["max_amp"])
    hybrid01 = 0.5*score01 + 0.3*S01 + 0.2*Amp01
    out_tab["anomaly_hybrid01"] = np.array(hybrid01, dtype=float)

    # 5) 导出 CSV
    res_csv = _p("iforest_results.csv")
    _coerce_object_str_columns(out_tab).write(res_csv, format="ascii.csv", overwrite=True)
    print(f"💾 孤立森林结果 CSV: {os.path.abspath(res_csv)}")

    pred_mask = (out_tab["iforest_outlier"] == True)
    pred_csv = _p("var_predicted_by_iforest.csv")
    out_tab[pred_mask].write(pred_csv, format="ascii.csv", overwrite=True)
    print(f"💾 孤立森林预测为变量的清单: {os.path.abspath(pred_csv)}  (count={int(np.sum(pred_mask))})")

    # 6) 画直方图 & TopN 叠加
    plt.figure(figsize=(6,4))
    plt.hist(score, bins=40, alpha=0.8)
    plt.xlabel("IsolationForest anomaly score (bigger = more anomalous)")
    plt.ylabel("Count")
    plt.title("Anomaly score histogram")
    plt.tight_layout()
    png_hist = _p("iforest_scores_hist.png")
    plt.savefig(png_hist, dpi=150); plt.close()
    print(f"🖼️ 分数直方图: {os.path.abspath(png_hist)}")

    # TopN by hybrid01
    idx_sorted = np.argsort(-hybrid01)
    top_idx = idx_sorted[:min(topn, len(idx_sorted))]
    overlay_png = _p("iforest_topN_overlay.png")
    _plot_cluster_overlay(Tn, Mn, top_idx,
                          title=f"Top-{len(top_idx)} anomalies (hybrid)",
                          outfile=overlay_png)
    print(f"🖼️ TopN 异常叠加图: {os.path.abspath(overlay_png)}")

    # 7) DS9 region（红：iForest outliers；绿：全部）
    write_ds9_regions(
        all_tab=out_tab,
        cand_tab=out_tab[pred_mask],
        out_all=_p("var_all_iforest.reg"),
        out_cand=_p("var_iforest.reg")
    )

    return dict(model=clf, score=score, score01=score01, hybrid01=hybrid01, preds=preds)

# ---------------- 主流程 ----------------

def process_three_catalogs(catA: str, catB: str, catC: str, radius_arcsec: float,
                         sig_sys: float, s_thresh: float, min_amp: float,
                         same_src_sep: float, same_band: bool):
    """处理三个星表，返回 (候选简表, A, B, C, 三重匹配表)"""
    a = read_catalog(catA)
    b = read_catalog(catB)
    c = read_catalog(catC)

    print(f"📊 星表 A 源数: {len(a)}")
    print(f"📊 星表 B 源数: {len(b)}")
    print(f"📊 星表 C 源数: {len(c)}")

    matched_AB = xmatch_one_to_one(a, b, radius_arcsec)
    print(f"🔍 A-B 初步匹配数 (<= {radius_arcsec}\"): {len(matched_AB)}")
    matched_AC = xmatch_one_to_one(a, c, radius_arcsec)
    print(f"🔍 A-C 初步匹配数 (<= {radius_arcsec}\"): {len(matched_AC)}")
    
    if len(matched_AB) == 0 or len(matched_AC) == 0:
        print("⚠️ 无足够匹配源进行三重匹配")
        return Table(), a, b, c, Table()

    # 列名去冲突
    if len(matched_AC) > 0:
        for col in list(matched_AC.colnames):
            if col.startswith('a_'):
                matched_AC.rename_column(col, col.replace('a_', 'a2_', 1))
            elif col.startswith('b_'):
                matched_AC.rename_column(col, col.replace('b_', 'c_', 1))
        if 'sep_arcsec' in matched_AC.colnames:
            matched_AC.rename_column('sep_arcsec', 'sep_AC')
    if 'sep_arcsec' in matched_AB.colnames:
        matched_AB.rename_column('sep_arcsec', 'sep_AB')
    
    print(f"✅ A-B 有效匹配数: {len(matched_AB)}")
    print(f"✅ A-C 有效匹配数: {len(matched_AC)}")
    
    triple_matched = join(matched_AB, matched_AC, keys="i1", join_type='inner')
    print(f"🌟 三重匹配源数 (A同时匹配B和C): {len(triple_matched)}")
    if len(triple_matched) == 0:
        return Table(), a, b, c, triple_matched

    # 删重复 A 列
    cols_to_remove = [col for col in triple_matched.colnames if col.startswith('a2_')]
    if cols_to_remove:
        triple_matched.remove_columns(cols_to_remove)
    
    # 计算统计量
    dmag_AB = triple_matched["b_mag"] - triple_matched["a_mag"]
    sig_AB = np.sqrt(triple_matched["a_magerr"]**2 + triple_matched["b_magerr"]**2 + 2*sig_sys**2)
    S_AB = np.abs(dmag_AB) / np.where(sig_AB > 0, sig_AB, np.nan)
    
    dmag_AC = triple_matched["c_mag"] - triple_matched["a_mag"]
    sig_AC = np.sqrt(triple_matched["a_magerr"]**2 + triple_matched["c_magerr"]**2 + 2*sig_sys**2)
    S_AC = np.abs(dmag_AC) / np.where(sig_AC > 0, sig_AC, np.nan)
    
    dmag_BC = triple_matched["c_mag"] - triple_matched["b_mag"]
    sig_BC = np.sqrt(triple_matched["b_magerr"]**2 + triple_matched["c_magerr"]**2 + 2*sig_sys**2)
    S_BC = np.abs(dmag_BC) / np.where(sig_BC > 0, sig_BC, np.nan)
    
    cond_AB = (S_AB >= s_thresh) & (np.abs(dmag_AB) >= min_amp)
    cond_AC = (S_AC >= s_thresh) & (np.abs(dmag_AC) >= min_amp)
    cond_BC = (S_BC >= s_thresh) & (np.abs(dmag_BC) >= min_amp)
    is_candidate = (cond_AB.astype(int) + cond_AC.astype(int) + cond_BC.astype(int)) >= 2
    cand_num = int(np.sum(is_candidate))
    
    out = Table()
    out["ra"] = triple_matched["a_ra"]
    out["dec"] = triple_matched["a_dec"]
    out["sep_AB"] = triple_matched["sep_AB"]
    out["sep_AC"] = triple_matched["sep_AC"]
    out["mag_A"] = triple_matched["a_mag"]
    out["mag_B"] = triple_matched["b_mag"]
    out["mag_C"] = triple_matched["c_mag"]
    out["dmag_AB"] = dmag_AB
    out["dmag_AC"] = dmag_AC
    out["dmag_BC"] = dmag_BC
    out["S_AB"] = S_AB
    out["S_AC"] = S_AC
    out["S_BC"] = S_BC
    out["max_S"] = np.max([S_AB, S_AC, S_BC], axis=0)
    out["max_amp"] = np.max([np.abs(dmag_AB), np.abs(dmag_AC), np.abs(dmag_BC)], axis=0)
    out["S"] = out["max_S"]
    out["dmag"] = out["max_amp"]
    out["is_candidate"] = is_candidate
    
    print(f"🌟 变量候选数 (至少两对满足 S>={s_thresh}, |Δm|>={min_amp}): {cand_num}")

    # 生成候选URL
    out["url"] = np.array([
        _build_lightcurve_url(ra_deg, dec_deg) if bool(flag) else ""
        for ra_deg, dec_deg, flag in zip(out["ra"], out["dec"], out["is_candidate"])
    ], dtype="U256")

    # 保存三重匹配表
    triple_fits = _p("triple_matched_catalog.fits")
    triple_csv = _p("triple_matched_catalog.csv")
    triple_matched.write(triple_csv, format="ascii.csv", overwrite=True)
    triple_to_fits = _fits_safe_table(triple_matched, width=256)
    triple_to_fits.write(triple_fits, format="fits", overwrite=True)
    print(f"💾 三重匹配星表 CSV:  {os.path.abspath(triple_csv)}")
    print(f"💾 三重匹配星表 FITS: {os.path.abspath(triple_fits)}")

    return out, a, b, c, triple_matched

def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--list", default="F:\\tools_mephisto\\images\\1278305\\images_u.list")
    ap.add_argument("--radius", type=float, default=2.0)
    ap.add_argument("--same-src-sep", type=float, default=1.0)
    ap.add_argument("--same-band", default=True)
    ap.add_argument("--sig-sys", type=float, default=SIG_SYS_DEFAULT)
    ap.add_argument("--s-thresh", type=float, default=5.0)
    ap.add_argument("--min-amp", type=float, default=0.3)
    ap.add_argument("--out", default="var_candidates.csv")
    # 聚类相关
    ap.add_argument("--do-cluster", default=True, help="启用 3 点序列聚类与画图")
    ap.add_argument("--cluster-k", type=int, default=20, help="聚类簇数")
    ap.add_argument("--cluster-seed", type=int, default=42)
    # 异常检测（孤立森林）
    ap.add_argument("--do-iforest", action="store_true", help="使用孤立森林做异常检测")
    ap.add_argument("--iforest-contam", type=float, default=0.03, help="异常占比(0~0.5)")
    ap.add_argument("--iforest-topn", type=int, default=50, help="叠加图显示的TopN异常个数")

    args = ap.parse_args()

    _ensure_outdir()

    paths = read_list_file(args.list)
    if len(paths) < 3:
        raise RuntimeError("list 文件必须至少包含三个 fits 路径（当前只找到 {} 个)".format(len(paths)))
    
    catA, catB, catC = paths[0], paths[1], paths[2]
    print(f"读取三重匹配:\n  A: {catA}\n  B: {catB}\n  C: {catC}")

    res, a, b, c, triple_matched = process_three_catalogs(
        catA, catB, catC,
        radius_arcsec=args.radius,
        sig_sys=args.sig_sys,
        s_thresh=args.s_thresh,
        min_amp=args.min_amp,
        same_src_sep=args.same_src_sep,
        same_band=args.same_band
    )

    if len(res) == 0:
        print("⚠️ 无结果")
        return

    # ====== 聚类与分簇画图 ======
    if args.do_cluster:
        run_clustering_and_plots(
            triple=triple_matched,
            out_tab=res,
            s_sys=args.sig_sys,
            k=args.cluster_k,
            random_state=args.cluster_seed
        )
        # 把带 cluster 的表也一起保存（CSV 无编码问题）
        res_csv = _p("var_all_with_cluster.csv")
        res_to_save = _coerce_object_str_columns(res, width=256)
        res_to_save.write(res_csv, format="ascii.csv", overwrite=True)
        print(f"💾 所有匹配结果（含聚类列）CSV: {os.path.abspath(res_csv)}")

    # ====== 异常检测（孤立森林） ======
    if True:
        run_iforest_and_mark(
            triple=triple_matched,
            out_tab=res,             # 直接在 res 表上回写各类分数与标签
            s_sys=args.sig_sys,
            contamination=args.iforest_contam,
            random_state=42,
            topn=args.iforest_topn
        )
        # 保存一次带 iForest 列的总表
        res_if_csv = _p("var_all_with_iforest.csv")
        _coerce_object_str_columns(res, 256).write(res_if_csv, format="ascii.csv", overwrite=True)
        print(f"💾 所有匹配结果（含 iForest）CSV: {os.path.abspath(res_if_csv)}")

    # ====== 候选写出（CSV+FITS），以及 URL 清单 ======
    cands = res[res["is_candidate"] == True]
    out_csv  = _p(args.out)
    out_fits = _p(os.path.splitext(args.out)[0] + ".fits")
    cands.write(out_csv,  format="ascii.csv", overwrite=True)
    cands_to_fits = _fits_safe_table(cands, width=256)
    cands_to_fits.write(out_fits, format="fits", overwrite=True)
    print(f"✅ 候选 CSV:  {os.path.abspath(out_csv)}")
    print(f"✅ 候选 FITS: {os.path.abspath(out_fits)}")
    if len(cands) > 0:
        print("示例候选（前5条）：")
        for i in range(min(5, len(cands))):
            print(f"  RA={cands['ra'][i]:.6f}  DEC={cands['dec'][i]:+.6f}  "
                  f"max_S={cands['max_S'][i]:.1f}  max_amp={cands['max_amp'][i]:.3f}  URL={cands['url'][i]}")

    # 另外导出纯文本 URL 清单（仅候选）
    url_list_path = _p("var_candidate_urls.txt")
    with open(url_list_path, "w", encoding="utf-8") as f:
        for url in cands["url"]:
            if isinstance(url, str) and url:
                f.write(url + "\n")
    print(f"🔗 候选 URL 清单: {os.path.abspath(url_list_path)}")

    # DS9 regions -> lightcurves/
    write_ds9_regions(
        all_tab=res,
        cand_tab=cands,
        out_all=_p("var_all_matches.reg"),
        out_cand=_p("var_candidates.reg")
    )

    # 未匹配源（A vs B, A vs C）
    a_only_AB, b_only = split_matched_unmatched(a, b, triple_matched, i1_name="i1", i2_name="i2_1")
    a_only_AC, c_only = split_matched_unmatched(a, c, triple_matched, i1_name="i1", i2_name="i2_2")
    
    if len(a_only_AB) == 0 and len(a_only_AC) == 0:
        a_only = Table()
    elif len(a_only_AB) == 0:
        a_only = a_only_AC
    elif len(a_only_AC) == 0:
        a_only = a_only_AB
    else:
        a_only = join(a_only_AB, a_only_AC, keys="a_row", join_type='outer')
        # 合并 ra/dec 字段名
        def _merge_cols(tab: Table, c1: str, c2: str, outc: str):
            if c1 in tab.colnames and c2 in tab.colnames:
                arr = np.array(tab[c1], dtype=float)
                mask = ~np.isfinite(arr)
                arr2 = np.array(tab[c2], dtype=float)
                arr[mask] = arr2[mask]
                tab.remove_columns([c1, c2])
                tab[outc] = arr
            elif c1 in tab.colnames and outc not in tab.colnames:
                tab.rename_column(c1, outc)
            elif c2 in tab.colnames and outc not in tab.colnames:
                tab.rename_column(c2, outc)
        _merge_cols(a_only, "ra_1", "ra_2", "ra")
        _merge_cols(a_only, "dec_1", "dec_2", "dec")

    write_ds9_unmatched(
        a_only, b_only,
        out_a=_p("unmatched_A_vs_B.reg"),
        out_b=_p("unmatched_B.reg")
    )
    write_ds9_unmatched(
        a_only, c_only,
        out_a=_p("unmatched_A_vs_C.reg"),
        out_b=_p("unmatched_C.reg")
    )

if __name__ == "__main__":
    main()
