#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from __future__ import annotations
import math
import sqlite3
from typing import Iterable, Tuple, Optional, List
import numpy as np
import pandas as pd
from clickhouse_driver import Client
from astropy.coordinates import SkyCoord
import astropy.units as u
from main_variable_virize_v2 import VariableCatalogAggregator

# === 复用你已有的聚合器 ===
# from your_file import VariableCatalogAggregator
# 这里假设 VariableCatalogAggregator 与本脚本在同一文件中，已定义在上方。
# 如果不在同一文件，请按你的路径导入。

# ========== 配置 ==========
CLICKHOUSE_HOST = '192.168.16.207'
CLICKHOUSE_PORT = 9000
CLICKHOUSE_USER = 'default'
CLICKHOUSE_PASS = 'Mephisto.2022!'
DB  = 'astronomy_catalog_V20250303_KM'
OBS_TABLE = f'{DB}.star_catalog'

SQLITE_PATH = 'var_flags.sqlite'

# 1 arcsec 合并阈值
DUP_ARCSEC = 1.0
# 目录检索半径（略大于阈值，确保能检到然后再 ≤1″ 精筛）
VIZIER_RADIUS_ARCSEC = 5.0

# ClickHouse 批量参数（可调大/小）
BATCH = 1000


# ========== SQLite 结构 ==========
CREATE_VAR_FLAG = """
CREATE TABLE IF NOT EXISTS var_flag (
    id           INTEGER PRIMARY KEY AUTOINCREMENT,
    ra_deg       REAL NOT NULL,
    dec_deg      REAL NOT NULL,
    is_variable  INTEGER NOT NULL,      -- 0/1
    catalogs_hit TEXT,                  -- 命中的目录（逗号分隔）
    reason       TEXT,                  -- 简要原因，如 "≤1\" match: VSX, GaiaDR3_varisum"
    checked_at   TEXT DEFAULT (datetime('now'))
);
"""
CREATE_INDEX_RADEC = "CREATE INDEX IF NOT EXISTS idx_varflag_radec ON var_flag(ra_deg, dec_deg);"

def ensure_sqlite(conn: sqlite3.Connection):
    conn.execute(CREATE_VAR_FLAG)
    conn.execute(CREATE_INDEX_RADEC)
    conn.commit()


# ========== 工具 ==========
def angsep_arcsec(ra1_deg: float, dec1_deg: float, ra2_deg: float, dec2_deg: float) -> float:
    """小角距离（单位：arcsec），足够 1″ 级别判定。"""
    dra = math.radians(ra2_deg - ra1_deg)
    ddec = math.radians(dec2_deg - dec1_deg)
    cd = math.cos(math.radians((dec1_deg + dec2_deg) * 0.5))
    dist_rad = math.hypot(ddec, cd * dra)
    return math.degrees(dist_rad) * 3600.0


def find_existing_within_1arcsec(conn: sqlite3.Connection, ra_deg: float, dec_deg: float) -> Optional[int]:
    """
    在 SQLite 里查找是否已有 ≤1″ 的源；若有，返回 id；否则 None。
    用一个矩形预筛再精确算角距（适合几十万级数据量）。
    """
    # 1″ ≈ 0.00027778 deg
    pad_deg = DUP_ARCSEC / 3600.0
    ra0, ra1 = ra_deg - pad_deg, ra_deg + pad_deg
    dec0, dec1 = dec_deg - pad_deg, dec_deg + pad_deg
    cur = conn.execute(
        "SELECT id, ra_deg, dec_deg FROM var_flag WHERE ra_deg BETWEEN ? AND ? AND dec_deg BETWEEN ? AND ?",
        (ra0, ra1, dec0, dec1)
    )
    candidates = cur.fetchall()
    for rid, rra, rdec in candidates:
        if angsep_arcsec(ra_deg, dec_deg, rra, rdec) <= DUP_ARCSEC:
            return int(rid)
    return None


def insert_or_update(conn: sqlite3.Connection, ra_deg: float, dec_deg: float,
                     is_variable: int, catalogs_hit: str, reason: str):
    """
    以 1″ 合并策略写入：若已有 ≤1″ 的记录，则以该记录为准（不再新增）。
    简单策略：已有记录则忽略（或可选择“升级为变源”）。
    """
    existed = find_existing_within_1arcsec(conn, ra_deg, dec_deg)
    if existed is None:
        conn.execute(
            "INSERT INTO var_flag (ra_deg, dec_deg, is_variable, catalogs_hit, reason, checked_at) "
            "VALUES (?,?,?,?,?, datetime('now'))",
            (ra_deg, dec_deg, is_variable, catalogs_hit, reason)
        )
        conn.commit()
    else:
        # 如果新结果是“变源”，而旧值可能是0，则选择“升级”（更稳妥）
        if is_variable:
            conn.execute(
                "UPDATE var_flag SET is_variable=1, catalogs_hit=?, reason=?, checked_at=datetime('now') WHERE id=?",
                (catalogs_hit, reason, existed)
            )
            conn.commit()


# ========== ClickHouse 抽取 ==========
def fetch_positions_batch(client: Client, offset: int, limit: int) -> pd.DataFrame:
    """
    从 star_catalog 抽取一批【近似去重后的】坐标：
    - 对 alpha_j2000 / delta_j2000 做 1e-6 deg (~0.0036″) 四舍五入分桶，再 GROUP BY，显著降重；
    - ORDER BY 后做 LIMIT/OFFSET 做分页。
    """
    sql = f"""
    SELECT
        round(alpha_j2000, 6) AS ra_deg,    -- 1e-6 deg ≈ 0.0036"
        round(delta_j2000, 6) AS dec_deg
    FROM {OBS_TABLE}
    LIMIT %(limit)s OFFSET %(offset)s
    """
    rows = client.execute(sql, params={'limit': limit, 'offset': offset}, with_column_types=True)[0]
    if not rows:
        return pd.DataFrame(columns=['ra_deg','dec_deg'])
    return pd.DataFrame(rows, columns=['ra_deg','dec_deg'])



# ========== 主流程 ==========
def is_variable_by_vizier(agg: 'VariableCatalogAggregator', ra_deg: float, dec_deg: float,
                          search_radius_arcsec: float = VIZIER_RADIUS_ARCSEC,
                          match_arcsec: float = DUP_ARCSEC) -> Tuple[int, str, str]:
    """
    用 VizieR 查询判断是否变源：
    - 先以较大半径（默认 5″）检索；
    - 再以 ≤ 1″ 过滤；有命中即判 1。
    返回：(is_variable, catalogs_hit, reason)
    """
    df = agg.query_all(ra_deg, dec_deg, radius_arcsec=search_radius_arcsec)
    if df.empty:
        return 0, "", "no hit in catalogs"

    # 精筛：≤ 1″
    coords_cat = SkyCoord(df['ra'].values * u.deg, df['dec'].values * u.deg)
    target = SkyCoord(ra_deg * u.deg, dec_deg * u.deg)
    seps = target.separation(coords_cat).arcsec
    mask = seps <= match_arcsec
    if not np.any(mask):
        return 0, "", "no ≤1\" match"

    df_hit = df.loc[mask].copy()
    df_hit['sep_arcsec'] = seps[mask]
    # 命中目录列表
    catalogs_hit = ",".join(sorted(df_hit['catalog'].astype(str).unique().tolist()))
    # 简要原因（包含最近的一个条目）
    i_min = int(np.argmin(df_hit['sep_arcsec'].values))
    row = df_hit.iloc[i_min]
    reason = f"≤1\" match: {row['catalog']} ({row['name_or_id']}) @ {row['sep_arcsec']:.2f}\""
    return 1, catalogs_hit, reason


def run():
    # 1) 准备连接
    client = Client(
        host=CLICKHOUSE_HOST, port=CLICKHOUSE_PORT,
        user=CLICKHOUSE_USER, password=CLICKHOUSE_PASS,
        settings={'use_numpy': True}
    )
    conn = sqlite3.connect(SQLITE_PATH)
    ensure_sqlite(conn)

    # 2) 聚合器（你上面已实现）
    agg = VariableCatalogAggregator(row_limit=10000)

    # 3) 批处理：用 LIMIT/OFFSET 简单分页（超大库可换“游标式分页”或按 HEALPix 扫描）
    offset = 0
    total_insert = 0
    while True:
        df = fetch_positions_batch(client, offset=offset, limit=BATCH)
        if df.empty:
            print("全部处理完成。")
            break

        for ra_deg, dec_deg in df[['ra_deg','dec_deg']].itertuples(index=False, name=None):
            # 本地 1″ 去重（已存在就跳过；若已有但新证据能“升级”为变源，则会在 insert_or_update 里处理）
            existed = find_existing_within_1arcsec(conn, ra_deg, dec_deg)
            if existed is not None:
                continue

            is_var, cats, reason = is_variable_by_vizier(agg, ra_deg, dec_deg,
                                                         search_radius_arcsec=VIZIER_RADIUS_ARCSEC,
                                                         match_arcsec=DUP_ARCSEC)
            insert_or_update(conn, ra_deg, dec_deg, is_var, cats, reason)
            total_insert += 1

        print(f"批次完成：offset={offset}, 本批 {len(df)} 条，累计新写入 {total_insert} 条。")
        offset += BATCH

    conn.close()


if __name__ == "__main__":
    run()
