#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import argparse, warnings, re, os
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
from pandas.tseries.offsets import MonthEnd

TRAIN_YEARS = list(range(2017, 2023))
FORE_YEARS  = [2025, 2026]

# ----------------- 基本工具 -----------------
def make_4day_grid_for_year(year: int):
    d = pd.Timestamp(year=year, month=3, day=1)
    end = pd.Timestamp(year=year, month=7, day=1)
    out = []
    while d <= end:
        out.append(d); d += pd.Timedelta(days=4)
    return pd.DatetimeIndex(out)

def build_future_grid():
    return pd.DatetimeIndex(sorted(np.concatenate([make_4day_grid_for_year(y) for y in FORE_YEARS])))

def ensure_percent_scale(s: pd.Series) -> pd.Series:
    s = pd.to_numeric(s, errors="coerce")
    if s.max(skipna=True) <= 1.0: s = s * 100.0
    return s.clip(lower=0, upper=100)

# ----------------- 训练表 -----------------
def load_train_table(path, target_col, cols=None):
    df = pd.read_csv(path)
    date_col = next((c for c in ["date","时间","日期","ds","time"] if c in df.columns), None)
    if date_col is None: raise ValueError("训练表未找到时间列（date/时间/日期/ds/time）")
    df["date"] = pd.to_datetime(df[date_col])
    df = df[(df["date"].dt.year>=min(TRAIN_YEARS)) & (df["date"].dt.year<=max(TRAIN_YEARS))].copy()
    if target_col not in df.columns: raise ValueError(f"找不到目标列 {target_col}")
    df[target_col] = ensure_percent_scale(df[target_col])
    if cols is None:
        num_cols = df.select_dtypes(include=[np.number]).columns.tolist()
        cols = [c for c in num_cols if c != target_col]
    train = df[["date", target_col] + cols].copy().rename(columns={"date":"ds", target_col:"y"})
    return train, cols

# ----------------- all_vars 时间解析 -----------------
def _try_parse_quarter_str(s: pd.Series) -> pd.Series:
    def _to_dt(x):
        if pd.isna(x): return pd.NaT
        xs = str(x).strip()
        zh_map = {"一":"1","二":"2","三":"3","四":"4","Ⅰ":"1","Ⅱ":"2","Ⅲ":"3","Ⅳ":"4"}
        for k,v in zh_map.items(): xs = xs.replace(k, v)
        m = re.match(r"^\s*(\d{4})\s*[-/ ]?\s*[Q第]?\s*([1-4])\s*[季度Q]?\s*$", xs)
        if m:
            y = int(m.group(1)); q = int(m.group(2))
            return pd.Timestamp(year=y, month=q*3, day=1) + MonthEnd(0)
        return pd.NaT
    return s.map(_to_dt)

def _try_parse_yearmonth(s: pd.Series) -> pd.Series:
    def _to_dt(x):
        if pd.isna(x): return pd.NaT
        xs = str(x).strip()
        if re.match(r"^\d{6}$", xs):
            dt = pd.to_datetime(xs, format="%Y%m", errors="coerce")
            return dt + MonthEnd(0) if pd.notna(dt) else pd.NaT
        if re.match(r"^\d{4}[-/.]\d{1,2}$", xs):
            dt = pd.to_datetime(xs, errors="coerce")
            return (dt + MonthEnd(0)) if pd.notna(dt) else pd.NaT
        return pd.NaT
    return s.map(_to_dt)

def _coerce_datetime(df: pd.DataFrame, date_col: str) -> pd.DatetimeIndex:
    ser = df[date_col]
    dt = pd.to_datetime(ser, errors="coerce", infer_datetime_format=True)
    if dt.notna().mean() > 0.8: return pd.DatetimeIndex(dt)
    dt = _try_parse_quarter_str(ser)
    if dt.notna().mean() > 0.8: return pd.DatetimeIndex(dt)
    dt = _try_parse_yearmonth(ser)
    if dt.notna().mean() > 0.8: return pd.DatetimeIndex(dt)
    return pd.DatetimeIndex([pd.NaT]*len(ser))

def guess_and_parse_datetime(df: pd.DataFrame) -> pd.DatetimeIndex:
    for cand in ["date","时间","日期","ds","time"]:
        if cand in df.columns:
            dt = _coerce_datetime(df, cand)
            if dt.notna().any(): return dt
    low = {c.lower(): c for c in df.columns}
    if "year" in low and ("quarter" in low or "season" in low):
        y = pd.to_numeric(df[low["year"]], errors="coerce")
        qn = low["quarter"] if "quarter" in low else low["season"]
        q = pd.to_numeric(df[qn], errors="coerce").clip(1,4)
        dt = pd.to_datetime(dict(year=y, month=(q*3).astype("Int64"), day=1), errors="coerce") + MonthEnd(0)
        return pd.DatetimeIndex(dt)
    if "year" in low and "month" in low:
        y = pd.to_numeric(df[low["year"]], errors="coerce")
        m = pd.to_numeric(df[low["month"]], errors="coerce").clip(1,12)
        dt = pd.to_datetime(dict(year=y, month=m, day=1), errors="coerce") + MonthEnd(0)
        return pd.DatetimeIndex(dt)
    first = df.columns[0]
    dt = _coerce_datetime(df, first)
    if dt.notna().any(): return dt
    raise ValueError("无法识别时间列；可用列名：" + ", ".join(map(str, df.columns)))

# ----------------- 因子外推（2025–2026 的 4天栅格） -----------------
def forecast_quarterly_const(series_quarter: pd.Series, fut_grid: pd.DatetimeIndex) -> pd.Series:
    s = series_quarter.copy().sort_index().resample("Q").mean().dropna()
    try:
        from prophet import Prophet
        dfp = pd.DataFrame({"ds": s.index, "y": s.values})
        m = Prophet(weekly_seasonality=False, daily_seasonality=False, yearly_seasonality=True)
        m.fit(dfp)
        q_targets = []
        for y in FORE_YEARS:
            q_targets += [pd.Timestamp(f"{y}-03-31"), pd.Timestamp(f"{y}-06-30")]
        futq = pd.DataFrame({"ds": pd.to_datetime(q_targets)})
        yhat = m.predict(futq)[["ds","yhat"]].set_index("ds")["yhat"]
    except Exception:
        idx = s.index.view("i8").astype(float)
        vals = s.values.astype(float)
        k, b = np.polyfit(idx, vals, 1)
        q_targets = []
        for y in FORE_YEARS:
            q_targets += [pd.Timestamp(f"{y}-03-31"), pd.Timestamp(f"{y}-06-30")]
        idx2 = pd.to_datetime(q_targets).view("i8").astype(float)
        yhat = pd.Series(k*idx2 + b, index=pd.to_datetime(q_targets))

    def quarter_end(ts):
        q = (ts.month-1)//3 + 1
        return pd.Timestamp(year=ts.year, month=q*3, day=1) + MonthEnd(0)
    qe = pd.Series([quarter_end(d) for d in fut_grid], index=fut_grid)
    yhat_full = yhat.reindex(yhat.index.union(qe.unique())).sort_index().ffill().bfill()
    return yhat_full.reindex(qe.values).set_axis(fut_grid)

def forecast_continuous(series_daily: pd.Series, fut_grid: pd.DatetimeIndex, cp_scale=0.04) -> pd.Series:
    s = pd.to_numeric(series_daily, errors="coerce").asfreq("D").interpolate("time").ffill().bfill()
    s_train = s[(s.index.year>=min(TRAIN_YEARS)) & (s.index.year<=max(TRAIN_YEARS))]
    try:
        from prophet import Prophet
        dfp = pd.DataFrame({"ds": s_train.index, "y": s_train.values})
        m = Prophet(weekly_seasonality=False, daily_seasonality=False,
                    yearly_seasonality=True, changepoint_prior_scale=cp_scale)
        m.fit(dfp)
        fut = pd.DataFrame({"ds": fut_grid})
        pred = m.predict(fut)[["ds","yhat"]].set_index("ds")["yhat"]
        return pred
    except Exception:
        t0 = (s_train.index - s_train.index[0]).days.values.astype(float)
        y  = s_train.values.astype(float)
        w = 2*np.pi/365.25
        X = np.c_[np.ones_like(t0), t0, np.sin(w*t0), np.cos(w*t0)]
        beta = np.linalg.pinv(X) @ y
        t_pred = (fut_grid - s_train.index[0]).days.values.astype(float)
        Xp = np.c_[np.ones_like(t_pred), t_pred, np.sin(w*t_pred), np.cos(w*t_pred)]
        yhat = Xp @ beta
        return pd.Series(yhat, index=fut_grid, dtype=float)

def build_future_regressors(allvars_csv, cols, grad_col, fut_grid, cp_scale=0.04):
    raw = pd.read_csv(allvars_csv)
    dt = guess_and_parse_datetime(raw)
    raw = raw.copy()
    raw["date"] = dt
    raw = raw.dropna(subset=["date"]).sort_values("date").set_index("date")
    num = raw.select_dtypes(include=[np.number]).copy()
    if num.empty: raise ValueError("all_vars_yoy.csv 中没有数值列。")
    daily_all = num.resample("D").mean().interpolate("time").ffill().bfill()

    fac = pd.DataFrame(index=fut_grid)
    for col in cols:
        if col not in daily_all.columns and not (grad_col and col == grad_col and col in num.columns):
            raise ValueError(f"列 `{col}` 不在 all_vars_yoy.csv 的数值列中。")
        if grad_col and col == grad_col:
            fac[col] = forecast_quarterly_const(num[col], fut_grid)
        else:
            fac[col] = forecast_continuous(daily_all[col], fut_grid, cp_scale=cp_scale)
    return fac.reset_index().rename(columns={"index":"date"})

# ----------------- 自适应 cap/floor（日历型） -----------------
def build_cap_floor_calendar(train_df, fut_idx, cap_pct=0.90, cap_margin=6.0,
                             floor_pct=0.10, floor_margin=2.0, hard_cap=96.0):
    tmp = train_df[["ds","y"]].copy()
    tmp["mmdd"] = tmp["ds"].dt.strftime("%m-%d")
    grp = tmp.groupby("mmdd")["y"]
    cap_map   = grp.quantile(cap_pct) + cap_margin
    floor_map = grp.quantile(floor_pct) - floor_margin
    cap_map   = cap_map.clip(upper=hard_cap)
    floor_map = floor_map.clip(lower=0)

    # 保证 cap > floor + safety_gap
    safety_gap = 12.0
    adj = pd.DataFrame({"cap": cap_map, "floor": floor_map})
    need = adj["cap"] - adj["floor"] < safety_gap
    adj.loc[need, "cap"] = (adj.loc[need, "floor"] + safety_gap).clip(upper=hard_cap)

    # 平滑
    adj["cap"]   = adj["cap"].rolling(3, min_periods=1, center=True).mean()
    adj["floor"] = adj["floor"].rolling(3, min_periods=1, center=True).mean()

    # 映射
    tr_cf = pd.DataFrame({"ds": train_df["ds"]})
    tr_cf["mmdd"] = tr_cf["ds"].dt.strftime("%m-%d")
    tr_cf["cap"]   = tr_cf["mmdd"].map(adj["cap"]).astype(float)
    tr_cf["floor"] = tr_cf["mmdd"].map(adj["floor"]).astype(float)
    tr_cf.drop(columns=["mmdd"], inplace=True)

    fut_cf = pd.DataFrame({"ds": fut_idx})
    fut_cf["mmdd"]  = fut_cf["ds"].dt.strftime("%m-%d")
    fut_cf["cap"]   = fut_cf["mmdd"].map(adj["cap"]).astype(float)
    fut_cf["floor"] = fut_cf["mmdd"].map(adj["floor"]).astype(float)
    fut_cf.drop(columns=["mmdd"], inplace=True)

    return tr_cf, fut_cf

# ----------------- Prophet 多元（Logistic + 自适应 cap/floor） -----------------
def prophet_with_regressors_logistic(train_df: pd.DataFrame, future_df: pd.DataFrame,
                                     reg_cols, cp_scale=0.04, reg_scale=0.2) -> pd.DataFrame:
    from prophet import Prophet
    # 清洗缺失
    before = len(train_df)
    train_df = train_df.dropna(subset=["y","cap","floor"] + reg_cols)
    if before - len(train_df) > 0:
        print(f"[INFO] dropped {before - len(train_df)} NaN rows for training")

    m = Prophet(
        growth="logistic",
        yearly_seasonality=True,
        weekly_seasonality=False,
        daily_seasonality=False,
        changepoint_prior_scale=cp_scale,
        seasonality_mode="additive",
        interval_width=0.8
    )
    for c in reg_cols:
        m.add_regressor(c, standardize=True, prior_scale=reg_scale)

    m.fit(train_df[["ds","y","cap","floor"] + reg_cols])

    fut = future_df.copy()
    for c in reg_cols:
        if fut[c].isna().any():
            fut[c] = fut[c].interpolate().ffill().bfill()

    yhat = m.predict(fut[["ds","cap","floor"] + reg_cols])[["ds","yhat"]]
    yhat["yhat"] = yhat["yhat"].clip(lower=fut["floor"].values, upper=fut["cap"].values)
    yhat["yhat"] = yhat["yhat"].clip(0, 100)
    return yhat.rename(columns={"ds":"date","yhat":"就业率（%）"})

# ----------------- 主程序 -----------------
def main():
    ap = argparse.ArgumentParser(description="Prophet 多元（日期自适应 cap/floor；3/1~7/1 每4天；2017-2022 训，2025-2026 预测）")
    ap.add_argument("--train", required=True, help="训练表 CSV（建议用 pre_2016_2022_grid.csv）")
    ap.add_argument("--allvars", required=True, help="外生变量全量 CSV（用于因子外推）")
    ap.add_argument("--target-col", default="employment_rate")
    ap.add_argument("--grad-col", required=True, help="毕业生人数列名（季度常数，不插值）")
    ap.add_argument("--cols", nargs="*", default=None, help="参与建模/外推的因子列（至少4个）")
    ap.add_argument("--cap-pct", type=float, default=0.90)
    ap.add_argument("--cap-margin", type=float, default=6.0)
    ap.add_argument("--floor-pct", type=float, default=0.10)
    ap.add_argument("--floor-margin", type=float, default=2.0)
    ap.add_argument("--hard-cap", type=float, default=96.0)
    ap.add_argument("--cp-scale", type=float, default=0.04)
    ap.add_argument("--reg-scale", type=float, default=0.2)
    ap.add_argument("--out", default="prophet_forecast_2025_2026_4day.csv")
    args = ap.parse_args()

    if not os.path.exists(args.train) or not os.path.exists(args.allvars):
        raise SystemExit(f"找不到输入文件：{args.train} / {args.allvars}")

    # 训练表（2017–2022）
    train, cols = load_train_table(args.train, args.target_col, args.cols)

    # 未来回归因子（2025–2026 的 4天栅格）
    fut_idx = build_future_grid()
    factors_future = build_future_regressors(args.allvars, cols, args.grad_col, fut_idx, cp_scale=args.cp_scale)

    # 自适应 cap/floor（仅包含 ds,cap,floor）
    tr_cf, fut_cf = build_cap_floor_calendar(train, fut_idx,
                                             cap_pct=args.cap_pct, cap_margin=args.cap_margin,
                                             floor_pct=args.floor_pct, floor_margin=args.floor_margin,
                                             hard_cap=args.hard_cap)

    # === 关键修复：避免生成 *_x/*_y ===
    # 训练集：把 cap/floor 贴到原始 train（train 已含 y 和 regressors）
    train_df = train.merge(tr_cf, on="ds", how="left")  # 列：ds, y, <regressors>, cap, floor

    # 未来集：cap/floor 与未来因子按 ds 合并（列名保持与训练一致）
    fut_df = fut_cf.merge(factors_future.rename(columns={"date":"ds"}), on="ds", how="left")
    # ================================

    # 显性指定回归因子列，避免从 train_df 自动搜列导致 _x/_y
    reg_cols = cols

    yhat = prophet_with_regressors_logistic(
        train_df, fut_df, reg_cols=reg_cols,
        cp_scale=args.cp_scale, reg_scale=args.reg_scale
    )
    yhat.to_csv(args.out, index=False, encoding="utf-8-sig")
    print(f"[Prophet-Capped] saved: {args.out} rows={len(yhat)}")

if __name__ == "__main__":
    main()
