#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from astroquery.vizier import Vizier
from astropy.coordinates import SkyCoord
import astropy.units as u
import pandas as pd
import numpy as np

# 常用变星星表（VizieR 目录号）
CATALOGS = {
    "VSX": "B/vsx/vsx",                # AAVSO Variable Star Index（最权威的变星汇总）
    "GaiaDR3_varisum": "I/358/varisum",# Gaia DR3 变量性汇总表（vari_summary）
    "ASAS-SN": "II/366/catv2021",      # ASAS-SN 变星目录
    "ZTF_periodic": "J/ApJS/249/18"    # ZTF 周期变星目录（Chen+ 2020）
}

# 尝试从不同星表里“猜测/读取”类型和周期的列名（容错，避免列名差异导致拿不到）
TYPE_CANDIDATES = [
    "VarType", "Type", "otype", "class", "Class", "Cls", "OTYPE", "Best_Class", "best_class_name"
]
PERIOD_CANDIDATES = [
    "Per", "Period", "PERIOD", "P", "p", "logP", "freq", "Frequency"
]
AMP_CANDIDATES = [
    "Amp", "Amplitude", "A", "AmpG", "RANGE", "range", "Gamp"
]

def _first_col_that_exists(table, candidates):
    for c in candidates:
        if c in table.colnames:
            return c
    return None

def query_variability_by_position(ra_deg, dec_deg, radius_arcsec=5.0, row_limit=5000):
    """
    在多个变星星表中进行位置检索，返回综合结果 DataFrame。
    """
    Vizier.ROW_LIMIT = row_limit
    coord = SkyCoord(ra=ra_deg*u.deg, dec=dec_deg*u.deg, frame="icrs")
    radius = radius_arcsec * u.arcsec

    rows = []
    for tag, cat in CATALOGS.items():
        try:
            res = Vizier.query_region(coord, radius=radius, catalog=cat)
            if len(res) == 0:
                continue
            # 有的目录会返回多个子表，这里逐一处理
            for tbl in res:
                # 尝试拿到类型/周期/幅度列
                tcol = _first_col_that_exists(tbl, TYPE_CANDIDATES)
                pcol = _first_col_that_exists(tbl, PERIOD_CANDIDATES)
                acol = _first_col_that_exists(tbl, AMP_CANDIDATES)

                # 统一 RA/Dec 列名（尽力而为）
                ra_col = _first_col_that_exists(tbl, ["RAJ2000","RA_ICRS","RAdeg","_RAJ2000","RA"])
                de_col = _first_col_that_exists(tbl, ["DEJ2000","DE_ICRS","DEdeg","_DEJ2000","Dec","DE"])
                id_col = _first_col_that_exists(tbl, ["Name","ID","Source","Source_ID","source_id","MainID","VSX","recno"])

                for row in tbl:
                    ra_v = float(row[ra_col]) if ra_col else np.nan
                    de_v = float(row[de_col]) if de_col else np.nan
                    name = str(row[id_col]) if id_col else ""
                    vtype = str(row[tcol]) if tcol else ""
                    # 周期有的用频率（d^-1），做个简单转换
                    period_val = None
                    if pcol:
                        try:
                            period_val = float(row[pcol])
                        except Exception:
                            period_val = None
                    else:
                        fcol = _first_col_that_exists(tbl, ["Freq","frequency","Frequency"])
                        if fcol:
                            try:
                                freq = float(row[fcol])
                                if freq > 0:
                                    period_val = 1.0 / freq
                            except Exception:
                                pass

                    amp_val = None
                    if acol:
                        try:
                            amp_val = float(row[acol])
                        except Exception:
                            pass

                    rows.append({
                        "catalog": tag,
                        "name_or_id": name,
                        "ra": ra_v,
                        "dec": de_v,
                        "type": vtype,
                        "period_day": period_val,
                        "amplitude": amp_val
                    })
        except Exception as e:
            # 某个目录失败不影响其他目录
            rows.append({
                "catalog": tag,
                "name_or_id": "",
                "ra": np.nan, "dec": np.nan,
                "type": f"[QUERY_ERROR] {e}",
                "period_day": None, "amplitude": None
            })

    df = pd.DataFrame(rows)
    if not df.empty:
        # 去重策略：按 catalog+name_or_id+period 近似去重
        df = df.drop_duplicates(subset=["catalog","name_or_id","period_day"], keep="first")
        # 排序：优先 VSX，然后 Gaia，再 ASAS-SN，再 ZTF；有周期的排前
        cat_order = {"VSX": 0, "GaiaDR3_varisum": 1, "ASAS-SN": 2, "ZTF_periodic": 3}
        df["cat_rank"] = df["catalog"].map(cat_order).fillna(9)
        df["hasP"] = df["period_day"].notna().astype(int)
        df = df.sort_values(["cat_rank","hasP"], ascending=[True, False]).drop(columns=["cat_rank","hasP"])
    return df


if __name__ == "__main__":
    # 示例：你之前提到的近似坐标 06:10:38 +21:38:42
    ra_deg = 34.8366  #15*(6 + 10/60 + 38/3600.0)   # 约 92.6583°
    dec_deg = -2.9776 #21 + 38/60 + 42/3600.0      # 约 21.645°

    df = query_variability_by_position(ra_deg, dec_deg, radius_arcsec=5.0)
    print(df.head(20).to_string(index=False))

    # 是否是变星的经验判断：
    # 1) VSX 命中 => 基本可以认为是已知变星
    # 2) GaiaDR3_varisum 命中 => Gaia 判为“变量性样本”，再看其它表/类型字段
    # 3) ASAS-SN / ZTF 命中且有类型/周期 => 很可能是变星
    if not df.empty:
        is_variable = any(df["catalog"].eq("VSX")) or any(df["catalog"].eq("ASAS-SN")) or any(df["catalog"].eq("ZTF_periodic")) or any(df["catalog"].eq("GaiaDR3_varisum"))
        print("\n判定（经验）：", "变星 ✅" if is_variable else "未见变星证据 ❓")
        # 如果要保存
        df.to_csv("variability_match.csv", index=False)
        print("已保存：variability_match.csv")
    else:
        print("在给定半径内未匹配到变星星表记录。")
