#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from __future__ import annotations
from typing import Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
import astropy.units as u
from astropy.coordinates import SkyCoord
from astroquery.vizier import Vizier


class VariableCatalogAggregator:
    """
    统一从 VizieR 的多份变星目录做位置检索，并把关键信息（类型/周期/振幅/名称/坐标）标准化合并。
    支持：VSX、GCVS（含你列出的多表）、Gaia DR3 varisum(+若干子表)、ASAS-SN、ZTF 周期、ATLAS、NSVS、LINEAR、OGLE（示例选几份）。
    """

    # ---- 可按需增删目录（键是别名、值是 VizieR 目录号） ----
    CATALOGS: Dict[str, str] = {
        # 你已有的
        "VSX": "B/vsx/vsx",
        "GaiaDR3_varisum": "I/358/varisum",
        "ASAS-SN": "II/366/catv2021",
        "ZTF_periodic": "J/ApJS/249/18",
        # GCVS 系列（你给的清单）
        "GCVS_cat": "B/gcvs/gcvs_cat",   # 主目录（5.1）
        "GCVS_rem": "B/gcvs/gcvs_rem",   # 备注
        "GCVS_xid": "B/gcvs/crossid",    # Cross-ID
        "NSV_cat":  "B/gcvs/nsv_cat",    # NSV + 补编
        "NSV_rem":  "B/gcvs/nsv_rem",
        "EVS_cat":  "B/gcvs/evs_cat",
        "EVS_rem":  "B/gcvs/evs_rem",
        # 其他常见巡天变星目录（可按需开/关）
        "GaiaDR3_RRLyrae": "I/358/vari_rrlyrae",
        "GaiaDR3_Cepheid": "I/358/vari_cepheid",
        "GaiaDR3_LongP":   "I/358/vari_longp",
        "ATLAS_var":       "J/ApJS/249/3",
        "NSVS_main":       "II/270/nsvs",
        "LINEAR_vars":     "J/AJ/147/142",
        # OGLE 示例（选几份；OGLE 子表很多，可按需要扩充）
        "OGLE_IV_Cep":     "J/AcA/68/1",     # Cepheids
        "OGLE_IV_RRL":     "J/AcA/68/315",   # RR Lyrae
    }

    # 可能出现的字段名候选（容错）
    TYPE_CANDS = ["VarType", "Type", "otype", "class", "Class", "Cls", "OTYPE", "Best_Class",
                  "best_class_name", "GCVS", "GCVStype", "type"]
    PER_CANDS  = ["Per", "Period", "PERIOD", "P", "p", "logP", "Pls", "P1", "P0"]
    FREQ_CANDS = ["Freq", "FREQ", "frequency", "Frequency"]
    AMP_CANDS  = ["Amp", "Amplitude", "A", "AmpG", "RANGE", "range", "Gamp", "AmpF", "Amp_R", "Amp_V"]
    NAME_CANDS = ["Name", "MainID", "ID", "ID1", "ID2", "Source", "Source_ID", "source_id",
                  "VSX", "GCVS", "NSV", "OGLE", "ZTF", "ATLAS_ID", "NSVS_ID", "Object", "objID"]

    # RA/Dec 列名候选（VizieR 常见）
    RA_CANDS = ["RAJ2000", "RA_ICRS", "RAdeg", "_RAJ2000", "RA"]
    DE_CANDS = ["DEJ2000", "DE_ICRS", "DEdeg", "_DEJ2000", "DE", "Dec"]

    def __init__(self, row_limit: int = 5000, timeout: int = 60):
        self.row_limit = row_limit
        self.timeout = timeout
        Vizier.ROW_LIMIT = row_limit
        # 若遇大表可设置列子集：Vizier(columns=[...])

    @staticmethod
    def _first_col(cols: List[str], candidates: List[str]) -> Optional[str]:
        for c in candidates:
            if c in cols:
                return c
        return None

    @staticmethod
    def _to_float_safe(x):
        try:
            return float(x)
        except Exception:
            return np.nan

    def _table_to_rows(self, tbl, tag: str) -> List[dict]:
        """将一个 astropy.table.Table 规范化为 dict 列表"""
        rows = []
        cols = list(tbl.colnames)
        ra_col = self._first_col(cols, self.RA_CANDS)
        de_col = self._first_col(cols, self.DE_CANDS)
        name_col = self._first_col(cols, self.NAME_CANDS)
        type_col = self._first_col(cols, self.TYPE_CANDS)
        per_col = self._first_col(cols, self.PER_CANDS)
        freq_col = self._first_col(cols, self.FREQ_CANDS)
        amp_col = self._first_col(cols, self.AMP_CANDS)

        for r in tbl:
            ra = self._to_float_safe(r[ra_col]) if ra_col else np.nan
            de = self._to_float_safe(r[de_col]) if de_col else np.nan
            name = str(r[name_col]) if name_col else ""
            vtype = str(r[type_col]) if type_col else ""
            period = np.nan
            if per_col:
                period = self._to_float_safe(r[per_col])
            elif freq_col:
                f = self._to_float_safe(r[freq_col])
                period = 1.0/f if f and np.isfinite(f) and f > 0 else np.nan
            amp = np.nan
            if amp_col:
                amp = self._to_float_safe(r[amp_col])

            rows.append(dict(
                catalog=tag, name_or_id=name, ra=ra, dec=de,
                var_type=vtype, period_day=period, amplitude=amp
            ))
        return rows

    def query_one_catalog(self, coord: SkyCoord, radius: u.Quantity, tag: str, cat_id: str) -> pd.DataFrame:
        """单目录位置检索并标准化为 DataFrame"""
        try:
            res = Vizier.query_region(coord, radius=radius, catalog=cat_id)
        except Exception as e:
            return pd.DataFrame([{
                "catalog": tag, "name_or_id": f"[QUERY_ERROR] {e}",
                "ra": np.nan, "dec": np.nan, "var_type": "", "period_day": np.nan, "amplitude": np.nan
            }])
        if not res:
            return pd.DataFrame(columns=["catalog","name_or_id","ra","dec","var_type","period_day","amplitude"])

        rows = []
        for t in res:
            rows.extend(self._table_to_rows(t, tag))
        df = pd.DataFrame(rows)
        # 去除完全空行
        if not df.empty:
            df = df.dropna(how="all")
        return df

    @staticmethod
    def _sky_match_dedup(df: pd.DataFrame, tol_arcsec: float = 2.0) -> pd.DataFrame:
        """
        按天空位置近邻去重（同一目录已去重；不同目录之间如果彼此<tol则认为是同源）。
        策略：按 ra/dec 网格化 + 分组聚合，保留 period 有值优先、再按目录优先级。
        """
        if df.empty:
            return df

        # 目录优先级：VSX > GCVS > GaiaVar > 其他巡天
        cat_rank = {
            "VSX": 0, "GCVS_cat": 1, "GCVS_xid": 2, "GCVS_rem": 3,
            "GaiaDR3_varisum": 4, "GaiaDR3_RRLyrae": 5, "GaiaDR3_Cepheid": 6, "GaiaDR3_LongP": 7,
            "ASAS-SN": 8, "ZTF_periodic": 9, "ATLAS_var": 10, "NSV_cat": 11, "NSV_main": 12,
            "LINEAR_vars": 13, "OGLE_IV_Cep": 14, "OGLE_IV_RRL": 15,
            "NSV_rem": 16, "EVS_cat": 17, "EVS_rem": 18
        }
        df = df.copy()
        df["cat_rank"] = df["catalog"].map(cat_rank).fillna(99).astype(int)
        df["hasP"] = df["period_day"].notna() & np.isfinite(df["period_day"])

        # 近邻聚类：先粗网格化（tol/2），再在组内做精细匹配
        cell = (tol_arcsec/2.0) / 3600.0
        df["_gx"] = np.floor(df["ra"].values / cell)
        df["_gy"] = np.floor((df["dec"].values + 90.0) / cell)

        keep_rows = []
        for (_, _), g in df.groupby(["_gx","_gy"]):
            if len(g) == 1:
                keep_rows.append(g.iloc[0])
                continue
            # 在组内两两比较，简单的迭代聚合
            coords = SkyCoord(g["ra"].values*u.deg, g["dec"].values*u.deg)
            used = np.zeros(len(g), dtype=bool)
            for i in range(len(g)):
                if used[i]: continue
                sep = coords[i].separation(coords).arcsec
                same = np.where(sep <= tol_arcsec)[0]
                cluster = g.iloc[same].copy()
                used[same] = True
                # 选择策略：1) 有周期优先 2) 目录优先 3) 幅度更有信息 4) 最后任意
                cluster = cluster.sort_values(by=["hasP","cat_rank"], ascending=[False, True])
                keep_rows.append(cluster.iloc[0])
        out = pd.DataFrame(keep_rows).drop(columns=["cat_rank","hasP","_gx","_gy"])
        return out.reset_index(drop=True)

    def query_all(
        self,
        ra_deg: float,
        dec_deg: float,
        radius_arcsec: float = 5.0,
        catalogs: Optional[Dict[str,str]] = None,
        sky_dedup_arcsec: float = 2.0,
    ) -> pd.DataFrame:
        """
        在多个目录做位置检索并合并。
        返回列：catalog, name_or_id, ra, dec, var_type, period_day, amplitude
        """
        Vizier.ROW_LIMIT = self.row_limit
        coord = SkyCoord(ra=ra_deg*u.deg, dec=dec_deg*u.deg, frame="icrs")
        radius = radius_arcsec * u.arcsec

        cats = catalogs or self.CATALOGS
        dfs = []
        for tag, cat_id in cats.items():
            df = self.query_one_catalog(coord, radius, tag, cat_id)
            if not df.empty:
                dfs.append(df)

        if not dfs:
            return pd.DataFrame(columns=["catalog","name_or_id","ra","dec","var_type","period_day","amplitude"])

        merged = pd.concat(dfs, ignore_index=True)

        # 合理去重
        merged = merged.drop_duplicates(subset=["catalog","name_or_id","ra","dec","period_day"], keep="first")
        merged = self._sky_match_dedup(merged, tol_arcsec=sky_dedup_arcsec)

        # 简单排序：目录优先 + 有周期的排前
        merged["hasP"] = merged["period_day"].notna() & np.isfinite(merged["period_day"])
        merged = merged.sort_values(by=["hasP","catalog","name_or_id"], ascending=[False, True, True]).drop(columns=["hasP"])
        return merged


# ---------------- 使用示例 ----------------
if __name__ == "__main__":
    # 例子：你之前感兴趣的位置（06:10:38, +21:38:42）
    ra_deg = 34.8366  #15*(6 + 10/60 + 38/3600.0)   # 约 92.6583°
    dec_deg = -2.9776 #21 + 38/60 + 42/3600.0      # 约 21.645°

    agg = VariableCatalogAggregator(row_limit=10000)
    df = agg.query_all(ra_deg, dec_deg, radius_arcsec=5)  # 半径按需要调整（1–10″）

    print(df.head(30).to_string(index=False))
    # 保存
    df.to_csv("var_match_vizier.csv", index=False)
    print("已保存：var_match_vizier.csv")
