#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
并行批量：FITS 星表 -> 并发查询光变曲线（LightCurveFetcher）-> 主线程弹窗画图(可选) -> 保存 PNG(可选) -> 汇总 parquet

特点
- 线程池并行 requests（I/O 密集），显著提速
- 画图严格在主线程执行，避免 matplotlib 线程安全问题
- 支持 skip/limit、失败不中断、逐目标保存 PNG、最终汇总 parquet

依赖：
- loguru, pandas, matplotlib, astropy, tqdm
- 你已有的 main_light_curve.py 中的 LightCurveFetcher / plot_light_curve / plot_multiband
"""

import os
import time
from typing import List, Dict, Optional, Tuple

from loguru import logger
import pandas as pd
from matplotlib import pyplot as plt
from astropy.table import Table
from astropy.coordinates import SkyCoord
import astropy.units as u
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor, as_completed

# 你已有的实现（保持不变）
from main_light_curve import LightCurveFetcher, plot_light_curve, plot_multiband

from astropy.io import fits

def get_fits_nrows(fits_path: str, ext: int = 1) -> int:
    """
    仅读取表头拿到行数，避免把整表载入内存。
    - ext：二进制表通常在扩展1（也可能是其他扩展，按你的文件结构调整）
    """
    with fits.open(fits_path, memmap=True) as hdul:
        hdu = hdul[ext]
        # 二进制表
        if hasattr(hdu, "data") and hdu.data is not None:
            return hdu.data.shape[0]
        # 若是 Astropy Table 存在于主HDU（少见）
        if hasattr(hdu, "header") and "NAXIS2" in hdu.header:
            return int(hdu.header["NAXIS2"])
    # 兜底：用 Table 读一次（会加载，但只执行一次）
    return len(Table.read(fits_path, format="fits"))

# ========================= FITS 读取 & 工具 ========================= #

def load_targets_from_fits(
    fits_path: str,
    name_col: str = "Name",
    ra_col: str = "RAdeg",
    dec_col: str = "DEdeg",
    limit: Optional[int] = None,
    skip: int = 0,
) -> List[Dict]:
    """
    读取 FITS 星表，返回 [{name, ra_hms, dec_dms, ra_deg, dec_deg}] 列表。
    :param limit: 只取接下来多少行（None 表示到文件尾）
    :param skip: 跳过前 N 行（从第 skip 行开始处理）
    """
    tab = Table.read(fits_path, format="fits")
    total_len = len(tab)

    if skip >= total_len:
        raise ValueError(f"skip={skip} 已超过星表总行数 {total_len}")

    start_idx = skip
    end_idx = total_len if limit is None else min(total_len, skip + limit)

    rows = []
    for i in range(start_idx, end_idx):
        name = str(tab[name_col][i]) if name_col in tab.colnames else f"row_{i}"
        ra_deg = float(tab[ra_col][i])
        dec_deg = float(tab[dec_col][i])
        c = SkyCoord(ra_deg * u.deg, dec_deg * u.deg, frame="icrs")
        ra_hms = c.ra.to_string(unit=u.hour, sep=":", precision=6, pad=True)
        dec_dms = c.dec.to_string(unit=u.deg, sep=":", precision=6, pad=True, alwayssign=True)
        rows.append({"name": name, "ra": ra_hms, "dec": dec_dms, "ra_deg": ra_deg, "dec_deg": dec_deg})

    logger.info(f"从第 {start_idx} 行开始，读取到第 {end_idx-1} 行，共 {len(rows)} 个目标。")
    return rows


def _ensure_errcol(df: pd.DataFrame) -> pd.DataFrame:
    """确保存在误差列 magerr_auto_s；若没有则回退或填充空值。"""
    if "magerr_auto_s" in df.columns:
        return df
    for alt in ["magerr", "mag_err", "MAGERR", "magerr_auto"]:
        if alt in df.columns:
            return df.rename(columns={alt: "magerr_auto_s"})
    df = df.copy()
    df["magerr_auto_s"] = pd.NA
    return df


# ========================= 单目标抓取（供并发调用） ========================= #

def _fetch_one(
    base_url: str,
    target: Dict,
    radius: float,
    band: str,
    start_date: Optional[str],
) -> Tuple[Dict, Optional[pd.DataFrame], Optional[Exception]]:
    """
    并发工作函数：返回 (target_dict, df 或 None, 异常或 None)
    """
    try:
        fetcher = LightCurveFetcher(base_url)  # 轻量对象，线程内各自实例化更稳妥
        df = fetcher.fetch(
            target["ra"], target["dec"],
            radius=radius, band=band, start_date=start_date, download=False
        )
        if not isinstance(df, pd.DataFrame) or df.empty:
            return target, None, None
        # 附加标识列
        df = _ensure_errcol(df).copy()
        df["target_name"] = target["name"]
        df["RA"]  = target["ra"];     df["Dec"]  = target["dec"]
        df["RAdeg"] = target["ra_deg"]; df["DEdeg"] = target["dec_deg"]
        return target, df, None
    except Exception as e:
        return target, None, e


# ========================= 并行批量主流程 ========================= #

def parallel_batch_query_from_fits(
    fits_path: str,
    base_url: str,
    radius: float = 1.5,
    band: str = "all",
    start_date: Optional[str] = None,
    save_dir: str = "./lightcurves",
    limit: Optional[int] = None,
    skip: int = 0,
    pause_s: float = 0.0,      # 并发模式下通常不需要间隔
    show_plot: bool = True,
    save_plot: bool = True,
    max_workers: int = 8,      # 并发请求数（按你的后端能力调）
) -> pd.DataFrame:
    """
    并发批量查询：
    - 使用 ThreadPoolExecutor 同时发起网络请求（I/O 密集）
    - 主线程按完成顺序处理结果：画图（不阻塞其余请求）、保存 PNG、累积汇总
    """
    os.makedirs(save_dir, exist_ok=True)
    figdir = os.path.join(save_dir, "figs")
    os.makedirs(figdir, exist_ok=True)

    if show_plot:
        plt.ion()

    targets = load_targets_from_fits(fits_path, limit=limit, skip=skip)
    all_rows: List[pd.DataFrame] = []

    with ThreadPoolExecutor(max_workers=max_workers) as ex:
        # 提交全部任务
        future_map = {
            ex.submit(_fetch_one, base_url, t, radius, band, start_date): t
            for t in targets
        }

        with tqdm(total=len(future_map), desc="Parallel fetching") as pbar:
            for fut in as_completed(future_map):
                t = future_map[fut]
                try:
                    target, df, err = fut.result()
                except Exception as e:
                    target, df, err = t, None, e

                # 主线程处理结果（画图 + 保存 + 汇总）
                if err is not None:
                    logger.error(f"[FAIL] {t['name']} ({t['ra']}, {t['dec']}): {err}")
                elif df is None or df.empty:
                    pass
                    # logger.warning(f"[WARN] {t['name']} 无数据")
                else:
                    # 画图（主线程安全）
                    title = f"{t['name']}  (RA={t['ra']}, Dec={t['dec']})"
                    try:
                        if band == "all" and ("band" in df.columns):
                            plot_multiband(df, title=title)
                        else:
                            plot_light_curve(df, title=title)

                        if save_plot:
                            safe_name = t["name"].replace(" ", "_").replace("/", "_")
                            plt.gcf().savefig(os.path.join(figdir, f"{safe_name}.png"), dpi=130)
                        if show_plot:
                            plt.pause(0.001)
                        plt.close()
                    except Exception as plot_err:
                        logger.error(f"[PLOT] {t['name']} 画图失败：{plot_err}")

                    all_rows.append(df)

                pbar.update(1)
                if pause_s > 0:
                    time.sleep(pause_s)

    # 汇总保存
    if all_rows:
        df_all = pd.concat(all_rows, ignore_index=True)
        out_parq = os.path.join(save_dir, "all_targets_lightcurves.parquet")
        df_all.to_parquet(out_parq, index=False)
        logger.info(f"✅ 汇总保存：{out_parq}，总记录 {len(df_all)}")
        return df_all
    else:
        logger.warning("⚠️ 没有获得任何记录")
        return pd.DataFrame()


import math
import os

def run_paged_over_fits(
    fits_path: str,
    base_url: str,
    radius: float = 1.5,
    band: str = "all",
    start_date: Optional[str] = None,
    save_dir: str = "./lightcurves",
    page_size: int = 1000,
    start_skip: int = 0,
    show_plot: bool = False,     # 跑大批量建议关掉弹窗
    save_plot: bool = True,
    max_workers: int = 8,
    merge_all_at_end: bool = False,   # 是否最后合并一个总 parquet（大数据量建议 False）
):
    os.makedirs(save_dir, exist_ok=True)
    total = get_fits_nrows(fits_path)
    if start_skip >= total:
        logger.warning(f"start_skip={start_skip} 已超过总行数 {total}，无需处理。")
        return

    # 计算起始页
    first_page = start_skip // page_size
    # 分页次数（包含最后一页的尾页）
    num_pages = math.ceil((total - start_skip) / page_size)

    logger.info(f"总行数：{total}，从第 {start_skip} 行开始，page_size={page_size}，总页数≈{num_pages}")

    batch_parqs = []
    processed = 0

    for p in range(first_page, first_page + num_pages):
        skip = p * page_size
        limit = min(page_size, total - skip)
        if limit <= 0:
            break

        logger.info(f"==== Page {p} | skip={skip}, limit={limit} ====")

        # 调用你现有的“单页处理”函数
        df_page = parallel_batch_query_from_fits(
            fits_path=fits_path,
            base_url=base_url,
            radius=radius,
            band=band,
            start_date=start_date,
            save_dir=save_dir,
            limit=limit,
            skip=skip,
            pause_s=0.0,
            show_plot=show_plot,
            save_plot=save_plot,
            max_workers=max_workers,
        )

        # 每页单独落盘（避免累计到内存）
        if df_page is not None and not df_page.empty:
            parq_path = os.path.join(save_dir, f"batch_{p:04d}.parquet")
            df_page.to_parquet(parq_path, index=False)
            batch_parqs.append(parq_path)
            logger.info(f"[PAGE DONE] 保存 {parq_path}，记录 {len(df_page)}")
            processed += len(df_page)
        else:
            logger.warning(f"[PAGE EMPTY] 第 {p} 页无记录")

    logger.info(f"✅ 全部分页完成，共处理记录数：{processed}")

    # 可选：最终合并为一个总 parquet（数据量大时可能吃内存，谨慎开启）
    if merge_all_at_end and batch_parqs:
        logger.info(f"开始合并 {len(batch_parqs)} 个分页 parquet 为 all_targets_lightcurves.parquet ...")
        dfs = [pd.read_parquet(pq) for pq in batch_parqs]
        df_all = pd.concat(dfs, ignore_index=True)
        out_parq = os.path.join(save_dir, "all_targets_lightcurves.parquet")
        df_all.to_parquet(out_parq, index=False)
        logger.info(f"✅ 合并完成：{out_parq}，总记录 {len(df_all)}")

# ========================= 示例入口 ========================= #

if __name__ == "__main__":
    BASE_URL   = "http://192.168.16.70:8082/api/query_light_curve_xp_version_V20250303_KM"
    RADIUS     = 2
    BAND       = "all"
    START_DATE = None

    FITS_PATH   = "AllVariable.fits"
    SAVE_DIR    = "./lightcurves"
    MAX_WORKERS = 8

    # 分页参数
    PAGE_SIZE   = 2000     # 每页处理多少行
    START_SKIP  = 0        # 支持断点续跑：改成上次停下的行号即可

    run_paged_over_fits(
        fits_path=FITS_PATH,
        base_url=BASE_URL,
        radius=RADIUS,
        band=BAND,
        start_date=START_DATE,
        save_dir=SAVE_DIR,
        page_size=PAGE_SIZE,
        start_skip=START_SKIP,
        show_plot=False,          # 建议大批量关闭弹窗
        save_plot=True,
        max_workers=MAX_WORKERS,
        merge_all_at_end=False,   # 如需最终合并成一个大 parquet，可改 True（注意内存）
    )
