#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import argparse, warnings, os, numpy as np, pandas as pd
warnings.filterwarnings("ignore")
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from statsmodels.tsa.api import VAR

TRAIN_YEARS = list(range(2017, 2022))  # 2017-2021
EVAL_YEAR   = 2022

# ========== 通用工具 ==========
def ensure_percent_scale(s: pd.Series) -> pd.Series:
    s = pd.to_numeric(s, errors="coerce")
    if s.max(skipna=True) <= 1.0: s = s * 100.0
    return s.clip(0, 100)

def make_4day_grid_for_year(year: int) -> pd.DatetimeIndex:
    d = pd.Timestamp(year=year, month=3, day=1)
    end = pd.Timestamp(year=year, month=7, day=1)
    out = []
    while d <= end:
        out.append(d); d += pd.Timedelta(days=4)
    return pd.DatetimeIndex(out)

def load_and_smooth_test(test_path, grid_4d: pd.DatetimeIndex,
                         date_col=None, y_col=None) -> pd.DataFrame:
    ext = os.path.splitext(test_path)[1].lower()
    df = pd.read_excel(test_path) if ext in [".xlsx", ".xls"] else pd.read_csv(test_path)
    if date_col is None:
        for c in ["date","时间","日期","ds","time","Time","Date"]:
            if c in df.columns: date_col = c; break
        if date_col is None: date_col = df.columns[0]
    if y_col is None:
        for c in ["employment_rate","整体就业率","就业率","y","就业率（%）"]:
            if c in df.columns: y_col = c; break
        if y_col is None: y_col = df.columns[1]
    df = df.rename(columns={date_col:"date", y_col:"y_true"})
    df["date"] = pd.to_datetime(df["date"]); df = df.sort_values("date")
    df["y_true"] = ensure_percent_scale(df["y_true"])
    s = df.set_index("date")["y_true"].asfreq("D").interpolate("time").ffill().bfill()
    y_on_grid = s.reindex(grid_4d).interpolate("time").ffill().bfill()
    return pd.DataFrame({"date": grid_4d, "y_true": y_on_grid.values})

# ========== 训练表日频化（避免列名冲突） ==========
def load_train_grid(path: str, target_col: str, cols=None, grad_col=None):
    raw = pd.read_csv(path)
    date_col = next((c for c in ["date","时间","日期","ds","time"] if c in raw.columns), None)
    if date_col is None: raise ValueError("训练表未找到时间列")
    raw["date"] = pd.to_datetime(raw[date_col])
    raw = raw[(raw["date"].dt.year>=min(TRAIN_YEARS)) & (raw["date"].dt.year<=max(TRAIN_YEARS))].copy()
    if target_col not in raw.columns: raise ValueError(f"找不到目标列 {target_col}")
    raw[target_col] = ensure_percent_scale(raw[target_col])

    if cols is None:
        num_cols = raw.select_dtypes(include=[np.number]).columns.tolist()
        cols = [c for c in num_cols if c != target_col]
    used_cols = [target_col] + [c for c in cols if c != target_col]

    base_idx = pd.date_range(f"{min(TRAIN_YEARS)}-03-01", f"{max(TRAIN_YEARS)}-07-01", freq="D")
    daily = pd.DataFrame(index=base_idx)
    rx = raw.set_index("date").sort_index()

    for c in used_cols:
        s = pd.to_numeric(rx[c], errors="coerce")
        if grad_col and c == grad_col:
            s = s.ffill().bfill()
            daily[c] = s.reindex(base_idx).ffill().bfill()
        else:
            daily[c] = s.reindex(base_idx).interpolate("time").ffill().bfill()

    # 仅保留 4天栅格
    grid = pd.DatetimeIndex(sorted(np.concatenate([make_4day_grid_for_year(y) for y in TRAIN_YEARS])))
    train_df = daily.reindex(grid).reset_index().rename(columns={"index":"date"})
    return train_df[["date"] + used_cols].copy(), used_cols

# ========== logit 变换 + 季节模板 ==========
def pct_to_logit(pct: pd.Series, eps_pct: float = 0.1) -> pd.Series:
    # 百分比 → (0,1) → logit
    p = (pct.clip(eps_pct, 100-eps_pct)) / 100.0
    return np.log(p/(1.0 - p))

def logit_to_pct(lgt: pd.Series) -> pd.Series:
    p = 1.0 / (1.0 + np.exp(-lgt))
    return (p * 100.0).clip(0, 100)

def pos_index(dates: pd.Series) -> pd.Series:
    # 在 3/1~7/1 的 4天等距序列中的“位次” 0..N-1
    base = pd.date_range("2020-03-01","2020-07-01",freq="4D")
    mp = {str(d)[5:10]: i for i,d in enumerate(base)}
    return dates.dt.strftime("%m-%d").map(mp).astype(int)

def build_season_template_logit(train_df: pd.DataFrame, target_col: str) -> pd.Series:
    # 在 logit 域上按“位次”取 2017-2021 的平均
    y_logit = pct_to_logit(train_df[target_col])
    k = pos_index(train_df["date"])
    temp = pd.Series(index=sorted(k.unique()))
    for i in temp.index:
        temp.loc[i] = y_logit[k==i].mean()
    return temp.astype(float)

def template_for_year(template_logit: pd.Series) -> pd.Series:
    # 生成 2022 栅格模板（与位次一一对应）
    idx22 = pos_index(pd.Series(make_4day_grid_for_year(EVAL_YEAR)))
    return template_logit.reindex(idx22.unique()).sort_index()

# ========== SN‑VAR：异常项 + 标准化 + 差分 ==========
def standardize(df: pd.DataFrame) -> tuple[pd.DataFrame, pd.Series, pd.Series]:
    mu = df.mean()
    sd = df.std().replace(0, 1.0)
    z = (df - mu) / sd
    return z, mu, sd

def inverse_standardize(z: pd.DataFrame, mu: pd.Series, sd: pd.Series) -> pd.DataFrame:
    return z * sd + mu

def make_diff(df: pd.DataFrame) -> pd.DataFrame:
    return df.diff().dropna()

def invert_diff(fdiff: np.ndarray, last_level: np.ndarray) -> np.ndarray:
    out = []
    prev = last_level.copy().astype(float)
    for i in range(fdiff.shape[0]):
        prev = prev + fdiff[i, :]
        out.append(prev.copy())
    return np.vstack(out)

def var_forecast_sn(train_dates: pd.Series,
                    y_logit: pd.Series,
                    x_df: pd.DataFrame,
                    steps: int,
                    maxlags:int=8):
    """
    y_logit：logit(就业率)
    x_df  ：其他因子（与日期对齐）
    步骤：构造 y 异常项 = y_logit - 模板；拼接 X；标准化；一阶差分；VAR；还原。
    """
    # 1) 季节模板（logit）
    k = pos_index(train_dates)
    template = pd.Series(index=sorted(k.unique()), dtype=float)
    for i in template.index:
        template.loc[i] = y_logit[k==i].mean()

    # 2) 异常项
    y_anom = y_logit.values - template[k].values
    data = pd.DataFrame({"y_anom": y_anom}, index=train_dates.values)
    X = x_df.copy().astype(float)
    data = pd.concat([data, X.set_index(train_dates.values)], axis=1)

    # 3) 标准化
    Z, mu, sd = standardize(data)

    # 4) 差分
    Zdiff = make_diff(Z)

    # 5) VAR 训练
    model = VAR(Zdiff.values)
    try:
        sel = model.select_order(maxlags=maxlags)
        lags = sel.aic if hasattr(sel, "aic") and sel.aic else 3
        if lags is None or lags <= 0: lags = min(3, maxlags)
    except Exception:
        lags = min(3, maxlags)
    res = model.fit(lags)

    # 6) 预测（差分域）
    fc_diff = res.forecast(Zdiff.values[-lags:], steps=steps)

    # 7) 反差分 -> 标准化层的水平
    last_level = Z.values[-1, :]
    Zlevel = invert_diff(fc_diff, last_level)

    # 8) 反标准化
    cols = ["y_anom"] + list(X.columns)
    Zdf = pd.DataFrame(Zlevel, columns=cols)
    level_df = inverse_standardize(Zdf, mu, sd)

    # 9) 还原 y_logit：模板(2022) + y_anom_pred
    temp22 = template_for_year(template).values  # 与位次对齐
    y_logit_pred = temp22 + level_df["y_anom"].values
    return y_logit_pred

# ========== 校正组件（可选） ==========
def residual_calendar_from_train(train_df: pd.DataFrame, target_col: str,
                                 smooth_window:int=5, clip_abs:float=10.0) -> pd.Series:
    # 用训练期目标在百分比域的“同位次平均残差”（简单滚均 baseline）
    roll = train_df[target_col].rolling(3, min_periods=1, center=True).mean()
    resid = train_df[target_col] - roll
    mmdd = train_df["date"].dt.strftime("%m-%d")
    m = resid.groupby(mmdd).mean()

    order = pd.date_range("2020-03-01","2020-07-01",freq="4D").strftime("%m-%d")
    out = m.reindex(order).interpolate().bfill().ffill()
    out = out.rolling(smooth_window, min_periods=1, center=True).mean()
    return out.clip(-clip_abs, clip_abs)

def gain_schedule(dates, gain_mar=1.3, gain_apr=1.15, gain_may=1.0, gain_jun=0.85):
    dts = pd.to_datetime(dates)
    months = dts.dt.month if isinstance(dts, pd.Series) else pd.Series(dts).dt.month
    idx = dts.index if isinstance(dts, pd.Series) else pd.RangeIndex(len(dts))
    g = pd.Series(1.0, index=idx)
    g.loc[months==3] = gain_mar
    g.loc[months==4] = gain_apr
    g.loc[months==5] = gain_may
    g.loc[(months==6)|(months==7)] = gain_jun
    return g.rolling(3, min_periods=1, center=True).mean()

def slope_limit(series: pd.Series, max_step: float = 3.0) -> pd.Series:
    y = series.values.astype(float).copy()
    for i in range(1, len(y)):
        step = y[i] - y[i-1]
        if step > max_step: y[i] = y[i-1] + max_step
    return pd.Series(y, index=series.index)

# ========== 指标 ==========
def metrics_table(y_true: np.ndarray, y_pred: np.ndarray, prefix:str="") -> pd.DataFrame:
    mse  = mean_squared_error(y_true, y_pred); rmse = np.sqrt(mse)
    mae  = mean_absolute_error(y_true, y_pred)
    mape = float(np.mean(np.abs((y_true - y_pred)/np.maximum(1e-6, np.abs(y_true))))*100.0)
    smape= float(100.0*np.mean(2.0*np.abs(y_pred-y_true)/np.maximum(1e-6,(np.abs(y_true)+np.abs(y_pred)))))
    r2   = r2_score(y_true, y_pred)
    return pd.DataFrame([{f"{prefix}MSE":mse, f"{prefix}RMSE":rmse, f"{prefix}MAE":mae,
                          f"{prefix}MAPE(%)":mape, f"{prefix}sMAPE(%)":smape, f"{prefix}R2":r2}])

# ========== 主程序 ==========
def main():
    ap = argparse.ArgumentParser(description="SN‑VAR：logit + 季节模板 + 异常项 VAR（2017–2021 训 → 评 2022）")
    ap.add_argument("--train", required=True)
    ap.add_argument("--test", required=True)
    ap.add_argument("--target-col", default="employment_rate")
    ap.add_argument("--grad-col", required=True)
    ap.add_argument("--cols", nargs="*", default=None)
    ap.add_argument("--maxlags", type=int, default=8)
    ap.add_argument("--use-calendar-corr", type=int, default=1)
    ap.add_argument("--gain-mar", type=float, default=1.3)
    ap.add_argument("--gain-apr", type=float, default=1.15)
    ap.add_argument("--gain-may", type=float, default=1.0)
    ap.add_argument("--gain-jun", type=float, default=0.85)
    ap.add_argument("--max-step", type=float, default=3.0)
    ap.add_argument("--out-base", default="pred_2022_var_base.csv")
    ap.add_argument("--out-corr", default="pred_2022_var_corrected.csv")
    ap.add_argument("--out-comp", default="eval_2022_comparison.csv")
    ap.add_argument("--out-metrics", default="eval_2022_metrics.csv")
    args = ap.parse_args()

    for p in [args.train, args.test]:
        if not os.path.exists(p): raise SystemExit(f"找不到文件：{p}")

    # 1) 读取训练网格
    train_df, used_cols = load_train_grid(args.train, args.target_col, args.cols, args.grad_col)
    if used_cols[0] != args.target_col:
        used_cols = [args.target_col] + [c for c in used_cols if c != args.target_col]

    # 2) 拆分目标与因子
    y_pct = train_df[args.target_col]
    y_logit = pct_to_logit(y_pct)
    X = train_df[[c for c in used_cols if c != args.target_col]].copy()

    # 3) SN‑VAR 预测 logit，就业率
    fut_idx = make_4day_grid_for_year(EVAL_YEAR)
    steps = len(fut_idx)
    y_logit_pred = var_forecast_sn(train_df["date"], y_logit, X, steps=steps, maxlags=args.maxlags)
    base = pd.DataFrame({"date": fut_idx, "base_pred": logit_to_pct(pd.Series(y_logit_pred))})
    base["base_pred"] = base["base_pred"].clip(0, 100)
    base.to_csv(args.out_base, index=False, encoding="utf-8-sig")
    print(f"[BASE] saved {args.out_base} rows={len(base)}")

    # 4) 可选：残差日历 + 增益 + 斜率限幅（在百分比域）
    if args.use_calendar_corr:
        res_cal = residual_calendar_from_train(train_df, target_col=args.target_col,
                                               smooth_window=5, clip_abs=10.0)
        gains = gain_schedule(base["date"],
                              gain_mar=args.gain_mar, gain_apr=args.gain_apr,
                              gain_may=args.gain_may, gain_jun=args.gain_jun)
        mmdd = base["date"].dt.strftime("%m-%d")
        adj = mmdd.map(res_cal).astype(float).values * gains.values
        corr = base.copy()
        corr["calendar_adjust"] = adj
        corr["corr_pred_pre"] = (corr["base_pred"] + corr["calendar_adjust"]).clip(0, 100)
        corr["corr_pred"] = slope_limit(corr["corr_pred_pre"], max_step=args.max_step).clip(0, 100)
        corr.to_csv(args.out_corr, index=False, encoding="utf-8-sig")
        print(f"[CORR] saved {args.out_corr} rows={len(corr)}")
    else:
        corr = base.copy()
        corr["calendar_adjust"] = 0.0
        corr["corr_pred"] = corr["base_pred"]
        corr.to_csv(args.out_corr, index=False, encoding="utf-8-sig")

    # 5) 测试集对齐评估
    test_df = load_and_smooth_test(args.test, fut_idx)
    comp = base.merge(corr[["date","corr_pred","calendar_adjust"]], on="date", how="left") \
               .merge(test_df, on="date", how="left")
    comp["误差_base(预测-真实)"] = comp["base_pred"] - comp["y_true"]
    comp["误差_corr(预测-真实)"] = comp["corr_pred"] - comp["y_true"]
    comp.to_csv(args.out_comp, index=False, encoding="utf-8-sig")
    print(f"[COMP] saved {args.out_comp} rows={len(comp)}")

    mets_base = metrics_table(comp["y_true"].values, comp["base_pred"].values, "BASE_")
    mets_corr = metrics_table(comp["y_true"].values, comp["corr_pred"].values, "CORR_")
    mets = pd.concat([mets_base, mets_corr], axis=1)
    mets.to_csv(args.out_metrics, index=False, encoding="utf-8-sig")
    print("[METRICS]\n", mets.to_string(index=False))

if __name__ == "__main__":
    main()
