#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import argparse, warnings, re, os
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from pandas.tseries.offsets import MonthEnd

TRAIN_YEARS = list(range(2017, 2022))  # 2017–2021
EVAL_YEAR   = 2022

# ---------- 基础工具 ----------
def ensure_percent_scale(s: pd.Series) -> pd.Series:
    s = pd.to_numeric(s, errors="coerce")
    if s.max(skipna=True) <= 1.0:
        s = s * 100.0
    return s.clip(lower=0, upper=100)

def make_4day_grid_for_year(year: int) -> pd.DatetimeIndex:
    d = pd.Timestamp(year=year, month=3, day=1)
    end = pd.Timestamp(year=year, month=7, day=1)
    out = []
    while d <= end:
        out.append(d); d += pd.Timedelta(days=4)
    return pd.DatetimeIndex(out)

def _try_parse_quarter_str(s: pd.Series) -> pd.Series:
    def _to_dt(x):
        if pd.isna(x): return pd.NaT
        xs = str(x).strip()
        zh_map = {"一":"1","二":"2","三":"3","四":"4","Ⅰ":"1","Ⅱ":"2","Ⅲ":"3","Ⅳ":"4"}
        for k,v in zh_map.items(): xs = xs.replace(k, v)
        m = re.match(r"^\s*(\d{4})\s*[-/ ]?\s*[Q第]?\s*([1-4])\s*[季度Q]?\s*$", xs)
        if m:
            y = int(m.group(1)); q = int(m.group(2))
            return pd.Timestamp(year=y, month=q*3, day=1) + MonthEnd(0)
        return pd.NaT
    return s.map(_to_dt)

def _try_parse_yearmonth(s: pd.Series) -> pd.Series:
    def _to_dt(x):
        if pd.isna(x): return pd.NaT
        xs = str(x).strip()
        if re.match(r"^\d{6}$", xs):
            dt = pd.to_datetime(xs, format="%Y%m", errors="coerce")
            return dt + MonthEnd(0) if pd.notna(dt) else pd.NaT
        if re.match(r"^\d{4}[-/.]\d{1,2}$", xs):
            dt = pd.to_datetime(xs, errors="coerce")
            return (dt + MonthEnd(0)) if pd.notna(dt) else pd.NaT
        return pd.NaT
    return s.map(_to_dt)

def _coerce_datetime(df: pd.DataFrame, date_col: str) -> pd.DatetimeIndex:
    ser = df[date_col]
    dt = pd.to_datetime(ser, errors="coerce", infer_datetime_format=True)
    if dt.notna().mean() > 0.8: return pd.DatetimeIndex(dt)
    dt = _try_parse_quarter_str(ser)
    if dt.notna().mean() > 0.8: return pd.DatetimeIndex(dt)
    dt = _try_parse_yearmonth(ser)
    if dt.notna().mean() > 0.8: return pd.DatetimeIndex(dt)
    return pd.DatetimeIndex([pd.NaT]*len(ser))

def guess_and_parse_datetime(df: pd.DataFrame) -> pd.DatetimeIndex:
    for cand in ["date","时间","日期","ds","time"]:
        if cand in df.columns:
            dt = _coerce_datetime(df, cand)
            if dt.notna().any(): return dt
    low = {c.lower(): c for c in df.columns}
    if "year" in low and ("quarter" in low or "season" in low):
        y = pd.to_numeric(df[low["year"]], errors="coerce")
        qn = low["quarter"] if "quarter" in low else low["season"]
        q = pd.to_numeric(df[qn], errors="coerce").clip(1,4)
        dt = pd.to_datetime(dict(year=y, month=(q*3).astype("Int64"), day=1), errors="coerce") + MonthEnd(0)
        return pd.DatetimeIndex(dt)
    if "year" in low and "month" in low:
        y = pd.to_numeric(df[low["year"]], errors="coerce")
        m = pd.to_numeric(df[low["month"]], errors="coerce").clip(1,12)
        dt = pd.to_datetime(dict(year=y, month=m, day=1), errors="coerce") + MonthEnd(0)
        return pd.DatetimeIndex(dt)
    first = df.columns[0]
    dt = _coerce_datetime(df, first)
    if dt.notna().any(): return dt
    raise ValueError("无法识别时间列；可用列名：" + ", ".join(map(str, df.columns)))

# ---------- 训练数据 ----------
def load_train_table(path: str, target_col: str, cols=None):
    df = pd.read_csv(path)
    date_col = next((c for c in ["date","时间","日期","ds","time"] if c in df.columns), None)
    if date_col is None: raise ValueError("训练表未找到时间列（date/时间/日期/ds/time）")
    df["date"] = pd.to_datetime(df[date_col])
    df = df[(df["date"].dt.year>=min(TRAIN_YEARS)) & (df["date"].dt.year<=max(TRAIN_YEARS))].copy()
    if target_col not in df.columns: raise ValueError(f"找不到目标列 {target_col}")
    df[target_col] = ensure_percent_scale(df[target_col])
    if cols is None:
        num_cols = df.select_dtypes(include=[np.number]).columns.tolist()
        cols = [c for c in num_cols if c != target_col]
    train = df[["date", target_col] + cols].copy().rename(columns={"date":"ds", target_col:"y"})
    return train, cols

# ---------- 外生因子外推（2022） ----------
def forecast_quarterly_const(series_quarter: pd.Series, fut_grid: pd.DatetimeIndex) -> pd.Series:
    s = series_quarter.copy().sort_index().resample("Q").mean().dropna()
    try:
        from prophet import Prophet
        dfp = pd.DataFrame({"ds": s.index, "y": s.values})
        m = Prophet(weekly_seasonality=False, daily_seasonality=False, yearly_seasonality=True)
        m.fit(dfp)
        q_targets = [pd.Timestamp(f"{EVAL_YEAR}-03-31"), pd.Timestamp(f"{EVAL_YEAR}-06-30")]
        futq = pd.DataFrame({"ds": pd.to_datetime(q_targets)})
        yhat = m.predict(futq)[["ds","yhat"]].set_index("ds")["yhat"]
    except Exception:
        idx = s.index.view("i8").astype(float)
        vals = s.values.astype(float)
        k, b = np.polyfit(idx, vals, 1)
        q_targets = [pd.Timestamp(f"{EVAL_YEAR}-03-31"), pd.Timestamp(f"{EVAL_YEAR}-06-30")]
        idx2 = pd.to_datetime(q_targets).view("i8").astype(float)
        yhat = pd.Series(k*idx2 + b, index=pd.to_datetime(q_targets))

    def quarter_end(ts):
        q = (ts.month-1)//3 + 1
        return pd.Timestamp(year=ts.year, month=q*3, day=1) + MonthEnd(0)
    qe = pd.Series([quarter_end(d) for d in fut_grid], index=fut_grid)
    yhat_full = yhat.reindex(yhat.index.union(qe.unique())).sort_index().ffill().bfill()
    return yhat_full.reindex(qe.values).set_axis(fut_grid)

def forecast_continuous(series_daily: pd.Series, fut_grid: pd.DatetimeIndex, cp_scale=0.15) -> pd.Series:
    s = pd.to_numeric(series_daily, errors="coerce").asfreq("D").interpolate("time").ffill().bfill()
    s_train = s[(s.index.year>=min(TRAIN_YEARS)) & (s.index.year<=max(TRAIN_YEARS))]
    try:
        from prophet import Prophet
        dfp = pd.DataFrame({"ds": s_train.index, "y": s_train.values})
        m = Prophet(weekly_seasonality=False, daily_seasonality=False,
                    yearly_seasonality=True, changepoint_prior_scale=cp_scale)
        m.fit(dfp)
        fut = pd.DataFrame({"ds": fut_grid})
        pred = m.predict(fut)[["ds","yhat"]].set_index("ds")["yhat"]
        return pred
    except Exception:
        # 退化：线性 + 年周期
        t0 = (s_train.index - s_train.index[0]).days.values.astype(float)
        y  = s_train.values.astype(float)
        w = 2*np.pi/365.25
        X = np.c_[np.ones_like(t0), t0, np.sin(w*t0), np.cos(w*t0)]
        beta = np.linalg.pinv(X) @ y
        t_pred = (fut_grid - s_train.index[0]).days.values.astype(float)
        Xp = np.c_[np.ones_like(t_pred), t_pred, np.sin(w*t_pred), np.cos(w*t_pred)]
        yhat = Xp @ beta
        return pd.Series(yhat, index=fut_grid, dtype=float)

def build_future_regressors_2022(allvars_csv, cols, grad_col, fut_grid, cp_scale=0.15):
    raw = pd.read_csv(allvars_csv)
    dt = guess_and_parse_datetime(raw)
    raw = raw.copy()
    raw["date"] = dt
    raw = raw.dropna(subset=["date"]).sort_values("date").set_index("date")
    num = raw.select_dtypes(include=[np.number]).copy()
    if num.empty: raise ValueError("all_vars_yoy.csv 中没有数值列。")
    daily_all = num.resample("D").mean().interpolate("time").ffill().bfill()

    fac = pd.DataFrame(index=fut_grid)
    for col in cols:
        if col not in daily_all.columns and not (grad_col and col == grad_col and col in num.columns):
            raise ValueError(f"列 `{col}` 不在 all_vars_yoy.csv 的数值列中。")
        if grad_col and col == grad_col:
            fac[col] = forecast_quarterly_const(num[col], fut_grid)
        else:
            fac[col] = forecast_continuous(daily_all[col], fut_grid, cp_scale=cp_scale)
    return fac.reset_index().rename(columns={"index":"date"})

# ---------- Prophet（logistic + 乘性季节） ----------
def fit_and_forecast_prophet(train_df: pd.DataFrame,
                             future_df: pd.DataFrame,
                             cap_value=80.0,
                             cp_scale=0.15,
                             reg_scale=0.4,
                             changepoint_range=0.7) -> pd.DataFrame:
    from prophet import Prophet
    reg_cols = [c for c in train_df.columns if c not in {"ds","y"}]
    train = train_df.copy()
    fut   = future_df.copy()
    train["cap"] = cap_value; train["floor"] = 0.0
    fut["cap"]   = cap_value; fut["floor"]   = 0.0

    m = Prophet(
        growth="logistic",
        yearly_seasonality=True,
        weekly_seasonality=False,
        daily_seasonality=False,
        seasonality_mode="multiplicative",
        changepoint_prior_scale=cp_scale,
        changepoint_range=changepoint_range,
        interval_width=0.8
    )
    for c in reg_cols:
        m.add_regressor(c, standardize=True, prior_scale=reg_scale)

    before = len(train)
    train = train.dropna(subset=["y","cap","floor"] + reg_cols)
    if before - len(train) > 0:
        print(f"[INFO] drop {before - len(train)} NaN rows for training")

    m.fit(train[["ds","y","cap","floor"] + reg_cols])

    for c in reg_cols:
        if fut[c].isna().any():
            fut[c] = fut[c].interpolate().ffill().bfill()

    pred = m.predict(fut[["ds","cap","floor"] + reg_cols])[["ds","yhat"]]
    pred["yhat"] = pred["yhat"].clip(0, 100)
    return pred.rename(columns={"ds":"date","yhat":"就业率（%）"})

# ---------- SCCS 约束平滑（关键！） ----------
def sccs_constrain(y_pred: np.ndarray,
                   cap: float = 100.0,
                   max_delta: float = 2.0,
                   min_delta_firstN: float = 0.0,
                   min_delta_N: int = 0) -> np.ndarray:
    """
    在尽量贴近 y_pred 的前提下，强制每一步增幅位于 [min_delta, max_delta]，并限制不超过 cap。
    y_pred: 原始预测（长度 n）
    max_delta: 每 4 天最大允许上升（百分点）
    min_delta_firstN: 前 N 步的“最小增幅”（抬高前段；若不需要抬高，传 0）
    min_delta_N: 应用最小增幅的步数（例如 6 即前 24 天）
    """
    y_pred = np.asarray(y_pred, dtype=float)
    n = len(y_pred)
    y_adj = np.zeros_like(y_pred)

    # 起点：取原预测或不低于 0
    y_adj[0] = np.clip(y_pred[0], 0.0, cap)

    for t in range(1, n):
        # 该步的下限
        min_d = min_delta_firstN if t <= min_delta_N else -np.inf  # 允许后段下降或持平？此处允许微降
        # 原始想走的步长
        d_raw = y_pred[t] - y_adj[t-1]
        # 夹在 [min_d, max_delta]
        d_new = np.clip(d_raw, min_d, max_delta)
        y_adj[t] = y_adj[t-1] + d_new
        # 上限保护
        if y_adj[t] > cap:
            y_adj[t] = cap

    # 可选：向后再走一遍，防止尾部“突高”影响前面（轻微回拉）
    for t in range(n-2, -1, -1):
        # 确保向后差分不超过 max_delta，轻度回拉
        if (y_adj[t+1] - y_adj[t]) > max_delta:
            y_adj[t] = y_adj[t+1] - max_delta
        if y_adj[t] > cap:
            y_adj[t] = cap
        if y_adj[t] < 0:
            y_adj[t] = 0

    return y_adj

# ---------- 测试集读取 & 插值 ----------
def load_and_smooth_test(test_path, grid_4d: pd.DatetimeIndex,
                         date_col=None, y_col=None) -> pd.DataFrame:
    ext = os.path.splitext(test_path)[1].lower()
    if ext in [".xlsx", ".xls"]:
        df = pd.read_excel(test_path)
    else:
        df = pd.read_csv(test_path)

    if date_col is None:
        for c in ["date","时间","日期","ds","time","Time","Date"]:
            if c in df.columns: date_col = c; break
        if date_col is None: date_col = df.columns[0]
    if y_col is None:
        for c in ["employment_rate","整体就业率","就业率","y","就业率（%）"]:
            if c in df.columns: y_col = c; break
        if y_col is None: y_col = df.columns[1]

    df = df.rename(columns={date_col:"date", y_col:"y_true"})
    df["date"] = pd.to_datetime(df["date"])
    df = df.sort_values("date")
    df["y_true"] = ensure_percent_scale(df["y_true"])

    s = df.set_index("date")["y_true"].asfreq("D")
    s = s.interpolate("time").ffill().bfill()
    y_on_grid = s.reindex(grid_4d).interpolate("time").ffill().bfill()
    return pd.DataFrame({"date": grid_4d, "y_true": y_on_grid.values})

# ---------- 指标 ----------
def metrics_table(y_true: np.ndarray, y_pred: np.ndarray) -> pd.DataFrame:
    y_true = np.asarray(y_true, dtype=float)
    y_pred = np.asarray(y_pred, dtype=float)
    mse  = np.mean((y_true - y_pred)**2)
    rmse = np.sqrt(mse)
    mae  = np.mean(np.abs(y_true - y_pred))
    mape = 100.0 * np.mean(np.abs((y_true - y_pred) / np.maximum(1e-6, np.abs(y_true))))
    smape = 100.0 * np.mean(2.0*np.abs(y_pred - y_true) / np.maximum(1e-6, (np.abs(y_true) + np.abs(y_pred))))
    ss_res = np.sum((y_true - y_pred)**2)
    ss_tot = np.sum((y_true - np.mean(y_true))**2) + 1e-12
    r2   = 1.0 - ss_res/ss_tot
    return pd.DataFrame([{
        "MSE": mse, "RMSE": rmse, "MAE": mae,
        "MAPE(%)": mape, "sMAPE(%)": smape, "R2": r2
    }])

# ---------- 主程序 ----------
def main():
    ap = argparse.ArgumentParser(description="评估：2017–2021 训练，Prophet 预测 2022(3/1~7/1/每4天) + 约束平滑限速，并与 test 对齐打分")
    ap.add_argument("--train", required=True, help="训练表 CSV（pre_2016_2021_grid.csv）")
    ap.add_argument("--allvars", required=True, help="外生变量 CSV（all_vars_yoy.csv）")
    ap.add_argument("--test", required=True, help="测试集（.xlsx or .csv），列如：时间,整体就业率")
    ap.add_argument("--target-col", default="employment_rate")
    ap.add_argument("--grad-col", required=True, help="毕业生人数列名（季度常数，不插值）")
    ap.add_argument("--cols", type=str, required=False, default=None,
                    help="因子列名，用空格分隔（至少4个）。可包含中文，例：\"CIER GDP 第一产业占比 第二产业占比 第三产业占比 毕业生人数_万人\"")
    ap.add_argument("--test-date-col", default=None)
    ap.add_argument("--test-y-col", default=None)
    # Prophet 调参
    ap.add_argument("--cap", type=float, default=80.0)
    ap.add_argument("--cp-scale", type=float, default=0.15)
    ap.add_argument("--reg-scale", type=float, default=0.4)
    ap.add_argument("--changepoint-range", type=float, default=0.7)
    # 约束平滑参数
    ap.add_argument("--max-delta", type=float, default=2.0, help="每4天最大允许上升的百分点")
    ap.add_argument("--min-delta-firstN", type=float, default=0.0, help="前N步的最小增幅（百分点），用来微抬3月初")
    ap.add_argument("--min-delta-N", type=int, default=0, help="应用最小增幅的步数")
    # 输出
    ap.add_argument("--out-pred", default="pred_2022_prophet_constrained.csv")
    ap.add_argument("--out-comp", default="eval_2022_comparison_constrained.csv")
    ap.add_argument("--out-metrics", default="eval_2022_metrics_constrained.csv")
    args = ap.parse_args()

    if args.cols is None:
        train, reg_cols = load_train_table(args.train, args.target_col, None)
    else:
        reg_cols = args.cols.strip().split()
        train, _ = load_train_table(args.train, args.target_col, reg_cols)

    for p in [args.train, args.allvars, args.test]:
        if not os.path.exists(p): raise SystemExit(f"找不到输入文件：{p}")

    # 未来 2022 栅格 + 因子
    fut_idx = make_4day_grid_for_year(EVAL_YEAR)
    future_reg = build_future_regressors_2022(args.allvars, reg_cols, args.grad_col, fut_idx, cp_scale=args.cp_scale)
    fut_df = future_reg.rename(columns={"date":"ds"})

    # Prophet 预测
    yhat = fit_and_forecast_prophet(train_df=train, future_df=fut_df,
                                    cap_value=args.cap,
                                    cp_scale=args.cp_scale,
                                    reg_scale=args.reg_scale,
                                    changepoint_range=args.changepoint_range)

    # === 关键：SCCS 约束平滑（限速 + 早段微抬） ===
    y0 = yhat["就业率（%）"].values
    y1 = sccs_constrain(
        y_pred=y0,
        cap=args.cap,
        max_delta=args.max_delta,
        min_delta_firstN=args.min_delta_firstN,
        min_delta_N=args.min_delta_N
    )
    yhat["就业率（%）"] = y1

    # 保存预测
    yhat.to_csv(args.out_pred, index=False, encoding="utf-8-sig")
    print(f"[PRED] saved {args.out_pred} rows={len(yhat)}")

    # 测试集插值 & 对齐
    test_df = load_and_smooth_test(args.test, fut_idx, args.test_date_col, args.test_y_col)

    # 对比与评估
    comp = pd.merge(yhat, test_df, on="date", how="inner")
    comp["误差(预测-真实)"] = comp["就业率（%）"] - comp["y_true"]
    comp.to_csv(args.out_comp, index=False, encoding="utf-8-sig")
    print(f"[COMP] saved {args.out_comp} rows={len(comp)}")

    mets = metrics_table(comp["y_true"].values, comp["就业率（%）"].values)
    mets.to_csv(args.out_metrics, index=False, encoding="utf-8-sig")
    print("[METRICS]\n", mets.to_string(index=False))

if __name__ == "__main__":
    main()
