# -*- coding: utf-8 -*-
"""
训练：使用除 2022 年之外的历史年份（如 2016–2021）的 3/1–7/1 窗口数据
流程：逐年回归插值(每4天) -> 拟合单变量模型 -> 先等距预测 2022(每4天)
    -> 对2022等距预测序列做平滑 -> 用PCHIP在 test.xlsx 日期上取值
评估：MSE/RMSE/MAE/MAPE/sMAPE/R2 + 导出明细
"""

# ================= 参数区（你可以只改这里） =================
INPUT_XLSX = "P1sortdata.xlsx"   # 全量原始数据
SHEET_NAME = 0

TEST_XLSX = "test.xlsx"          # 2022测试集（两列：日期, 值[可为0~1或百分比]）
TEST_YEAR = 2022

# 窗口与分辨率
WINDOW_START = (3, 1)            # 仅建模 3/1～7/1
WINDOW_END   = (7, 1)
RESOLUTION_DAYS = 4              # 统一等间隔（你的要求）

# 逐年回归插值方式："poly" 或 "loess"
REGRESSION_KIND = "poly"
POLY_DEGREE_CANDIDATES = (1, 2, 3)
LOESS_FRAC = 0.6

# 单变量模型：优先 SARIMAX(0,1,1)+Fourier(+线性趋势)，自动与 UCM 对比择优
FOURIER_K = 3
USE_TIME_TREND = True

# 对“等距预测的2022序列”做平滑
SMOOTH = True
SMOOTH_WINDOW = 9      # Savitzky-Golay窗口(奇数且<=序列长度)
SMOOTH_POLY = 2        # 拟合多项式阶数
SMOOTH_CLIP_01 = True  # 平滑后再裁剪到 [0,100]
# ==========================================================

import os, warnings, numpy as np, pandas as pd
warnings.filterwarnings("ignore")
os.makedirs("./output_p1", exist_ok=True)
from scipy.interpolate import interp1d, PchipInterpolator
from scipy.signal import savgol_filter
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.statespace.structural import UnobservedComponents

try:
    from statsmodels.nonparametric.smoothers_lowess import lowess
except Exception:
    lowess = None

# ------------------ 工具函数 ------------------
def _infer_cols(df: pd.DataFrame):
    date_candidates  = ["日期","date","时间","Date","time","Time"]
    value_candidates = ["就业率","就业率(%)","就业率（%）","y","value","rate"]
    dcol = None
    for c in df.columns:
        try:
            pd.to_datetime(df[c], errors="raise"); dcol = c; break
        except Exception:
            continue
    if dcol is None:
        for c in date_candidates:
            if c in df.columns: dcol = c; break
    vcol = None
    for c in df.columns:
        if c == dcol: continue
        s = pd.to_numeric(df[c], errors="coerce")
        if s.notna().sum() >= max(5, int(0.3*len(s))):
            vcol = c
            if any(k in str(c) for k in ["就","rate","value","%"]): break
    if vcol is None:
        for c in value_candidates:
            if c in df.columns: vcol = c; break
    if dcol is None or vcol is None:
        raise ValueError(f"无法识别日期/数值列：{df.columns.tolist()}")
    return dcol, vcol

def load_and_clean_all(path, sheet=0):
    df = pd.read_excel(path, sheet_name=sheet)
    dcol, vcol = _infer_cols(df)
    df = df[[dcol, vcol]].copy()
    df.columns = ["date","y"]
    df["date"] = pd.to_datetime(df["date"], errors="coerce")
    df["y"]    = pd.to_numeric(df["y"], errors="coerce")
    df = df.dropna().drop_duplicates("date").sort_values("date").reset_index(drop=True)
    # 统一到百分比
    if df["y"].median() <= 1.5 and df["y"].max() <= 1.2:
        df["y"] *= 100.0
    df["y"] = df["y"].clip(0, 100)
    df["year"] = df["date"].dt.year
    return df

def load_test_xlsx(path):
    df = pd.read_excel(path)
    dcol, vcol = _infer_cols(df)
    df = df[[dcol, vcol]].copy()
    df.columns = ["date","y_true"]
    df["date"] = pd.to_datetime(df["date"], errors="coerce")
    df["y_true"] = pd.to_numeric(df["y_true"], errors="coerce")
    df = df.dropna().sort_values("date").reset_index(drop=True)
    if df["y_true"].median() <= 1.5 and df["y_true"].max() <= 1.2:
        df["y_true"] *= 100.0
    df["y_true"] = df["y_true"].clip(0, 100)
    return df

def make_grid(year, start_mmdd, end_mmdd, step_days):
    start = pd.Timestamp(year=year, month=start_mmdd[0], day=start_mmdd[1])
    end   = pd.Timestamp(year=year, month=end_mmdd[0],   day=end_mmdd[1])
    return pd.date_range(start, end, freq=f"{int(step_days)}D")

def _aicc(n, sse, k):
    if n <= k + 1: return np.inf
    return n*np.log(max(sse/n, 1e-12)) + 2*k + (2*k*(k+1))/(n - k - 1)

def fit_poly_reg(x, y, deg, alpha=1e-6):
    mdl = Pipeline([
        ("poly", PolynomialFeatures(degree=deg, include_bias=True)),
        ("ridge", Ridge(alpha=alpha, fit_intercept=False, random_state=0))
    ])
    mdl.fit(x.reshape(-1,1), y)
    yhat = mdl.predict(x.reshape(-1,1))
    sse = float(np.sum((y - yhat)**2))
    return mdl, _aicc(len(y), sse, deg + 1)

def per_year_regression_interpolate(df_all, step_days, start_mmdd, end_mmdd, kind="poly"):
    out = []
    for y, g in df_all.groupby("year"):
        start = pd.Timestamp(year=y, month=start_mmdd[0], day=start_mmdd[1])
        end   = pd.Timestamp(year=y, month=end_mmdd[0],   day=end_mmdd[1])
        g_win = g[(g["date"] >= start) & (g["date"] <= end)].copy()
        g_use = g_win if len(g_win) >= 2 else g.copy()
        grid = make_grid(y, start_mmdd, end_mmdd, step_days)
        if len(g_use) < 2:
            if len(g) >= 2:
                xi = g["date"].astype("int64").values
                yi = g["y"].values
                try:   f = interp1d(xi, yi, kind="linear",  fill_value="extrapolate")
                except Exception: f = interp1d(xi, yi, kind="nearest", fill_value="extrapolate")
                yg = f(grid.astype("int64").values)
            else:
                yg = np.repeat(g["y"].iloc[0], len(grid))
            out.append(pd.DataFrame({"date": grid, "y": np.clip(yg,0,100), "year": y}))
            continue
        x = g_use["date"].dt.dayofyear.values.astype(float)
        yv = g_use["y"].values.astype(float)
        xg = grid.dayofyear.values.astype(float)
        if kind == "loess" and lowess is not None and len(g_use) >= 5:
            sm = lowess(yv, x, frac=LOESS_FRAC, it=0, return_sorted=True)
            f = interp1d(sm[:,0], sm[:,1], kind="linear", fill_value="extrapolate")
            yg = f(xg)
        else:
            best = (None, np.inf, None)
            for deg in POLY_DEGREE_CANDIDATES:
                try:
                    m, aicc = fit_poly_reg(x, yv, deg)
                    if aicc < best[1]: best = (deg, aicc, m)
                except Exception:
                    continue
            model = best[2] if best[2] is not None else Ridge().fit(x.reshape(-1,1), yv)
            yg = model.predict(xg.reshape(-1,1))
        out.append(pd.DataFrame({"date": grid, "y": np.clip(yg,0,100), "year": y}))
    return pd.concat(out, ignore_index=True).sort_values(["year","date"]).reset_index(drop=True)

def make_features(dates, K=3, base_date=None, use_trend=True):
    dates = pd.to_datetime(dates)
    idx = pd.DatetimeIndex(dates)
    doy = idx.dayofyear.values.astype(float)
    X = {"intercept": np.ones_like(doy)}
    for k in range(1, K+1):
        X[f"sin_{k}"] = np.sin(2*np.pi*k * doy / 365.25)
        X[f"cos_{k}"] = np.cos(2*np.pi*k * doy / 365.25)
    if use_trend and base_date is not None:
        X["trend_years"] = (idx - pd.Timestamp(base_date)).days.values / 365.25
    return pd.DataFrame(X, index=idx)

def fit_sarimax_with_exog(y, dates, K, base_date, use_trend):
    exog = make_features(dates, K=K, base_date=base_date, use_trend=use_trend)
    mdl = SARIMAX(endog=y, exog=exog, order=(0,1,1),
                  enforce_stationarity=False, enforce_invertibility=False)
    return mdl.fit(disp=False)

def fit_ucm(y, season_len):
    mdl = UnobservedComponents(endog=y, level="local linear trend", seasonal=season_len,
                               stochastic_level=True, stochastic_seasonal=True)
    return mdl.fit(disp=False)

def metrics(y_true, y_pred):
    y_true = np.asarray(y_true); y_pred = np.asarray(y_pred)
    mse  = np.mean((y_pred - y_true)**2)
    rmse = np.sqrt(mse)
    mae  = np.mean(np.abs(y_pred - y_true))
    eps = 1e-6
    mape = np.mean(np.abs((y_pred - y_true) / np.maximum(np.abs(y_true), eps))) * 100.0
    smape = np.mean(2*np.abs(y_pred - y_true) / np.maximum(np.abs(y_pred)+np.abs(y_true), eps)) * 100.0
    ss_res = np.sum((y_pred - y_true)**2); ss_tot = np.sum((y_true - y_true.mean())**2)
    r2 = 1 - ss_res/ss_tot if ss_tot > 0 else np.nan
    return {"MSE": mse, "RMSE": rmse, "MAE": mae, "MAPE(%)": mape, "sMAPE(%)": smape, "R2": r2}

def smooth_series(y, window=9, poly=2):
    n = len(y)
    if n < 5: return y
    w = min(window, n - (1 - n % 2))  # 确保奇数且<=n
    if w < 3: w = 3
    if w % 2 == 0: w -= 1
    if poly >= w: poly = max(1, w - 1)
    try:
        return savgol_filter(y, window_length=w, polyorder=poly, mode="interp")
    except Exception:
        return y

# ------------------ 主流程 ------------------
def main():
    # 1) 读全量 & 清洗
    df_all = load_and_clean_all(INPUT_XLSX, SHEET_NAME)
    df_all.to_csv("./output_p1/cleaned_all.csv", index=False, encoding="utf-8-sig")

    # 2) 训练集：去掉 2022
    df_train_all = df_all[df_all["year"] < TEST_YEAR].copy()
    if df_train_all.empty:
        raise RuntimeError("训练集为空，请检查年份。")

    # 3) 逐年回归 -> 训练窗口等距序列（每4天）
    step_days = int(RESOLUTION_DAYS)
    interped = per_year_regression_interpolate(df_train_all, step_days, WINDOW_START, WINDOW_END, kind=REGRESSION_KIND)
    interped.to_csv(f"./output_p1/interpolated_train_{REGRESSION_KIND}_every{step_days}d.csv",
                    index=False, encoding="utf-8-sig")

    train = interped.sort_values("date").reset_index(drop=True)
    y_train = train["y"].values
    d_train = pd.DatetimeIndex(train["date"])
    base_date = d_train.min()
    season_len = len(make_grid(TEST_YEAR-1, WINDOW_START, WINDOW_END, step_days))

    # 4) 拟合两类单变量模型（择优）
    picked = None
    try:
        res_a = fit_sarimax_with_exog(y_train, d_train, K=FOURIER_K, base_date=base_date, use_trend=USE_TIME_TREND)
        aic_a = res_a.aic
        picked = ("SARIMAX_Fourier+Trend", res_a, aic_a)
        print(f"[Info] SARIMAX_Fourier+Trend AIC={aic_a:.2f}")
    except Exception as e:
        print(f"[Warn] SARIMAX_Fourier+Trend 失败: {e}")

    try:
        res_b = fit_ucm(y_train, season_len)
        aic_b = res_b.aic
        print(f"[Info] UCM(LLT+Seasonal) AIC={aic_b:.2f}")
        if picked is None or aic_b < picked[2]:
            picked = ("UCM", res_b, aic_b)
    except Exception as e:
        print(f"[Warn] UCM 失败: {e}")

    if picked is None:
        raise RuntimeError("模型拟合失败。")
    model_name, res, _ = picked
    print(f"[Info] 采用模型：{model_name}")

    # 5) —— 先等距预测 2022（每4天） —— #
    grid2022 = make_grid(TEST_YEAR, WINDOW_START, WINDOW_END, step_days)
    if model_name == "SARIMAX_Fourier+Trend":
        X_fut = make_features(grid2022, K=FOURIER_K, base_date=base_date, use_trend=USE_TIME_TREND)
        fc = res.get_forecast(steps=len(grid2022), exog=X_fut)
    else:
        fc = res.get_forecast(steps=len(grid2022))
    yhat_grid = np.clip(np.asarray(fc.predicted_mean), 0, 100)

    # 6) 对“等距预测序列”做平滑（可关）
    if SMOOTH:
        yhat_grid = smooth_series(yhat_grid, window=SMOOTH_WINDOW, poly=SMOOTH_POLY)
    if SMOOTH_CLIP_01:
        yhat_grid = np.clip(yhat_grid, 0, 100)

    # 7) 读取 test.xlsx，并用 PCHIP 在这些日期上取值
    test_df = load_test_xlsx(TEST_XLSX)
    # 限定到窗口范围
    st = pd.Timestamp(year=TEST_YEAR, month=WINDOW_START[0], day=WINDOW_START[1])
    ed = pd.Timestamp(year=TEST_YEAR, month=WINDOW_END[0],   day=WINDOW_END[1])
    test_df = test_df[(test_df["date"] >= st) & (test_df["date"] <= ed)].copy()
    if test_df.empty:
        raise RuntimeError("测试点不在 3/1–7/1 窗口内，或为空。")

    # PCHIP 插值到不等间隔测试日期
    x_grid = grid2022.view("int64").astype(float)    # ns 时间戳
    x_test = test_df["date"].view("int64").astype(float)
    try:
        f_pchip = PchipInterpolator(x_grid, yhat_grid, extrapolate=True)
        y_pred = f_pchip(x_test)
    except Exception:
        f_lin = interp1d(x_grid, yhat_grid, kind="linear", fill_value="extrapolate")
        y_pred = f_lin(x_test)
    y_true = test_df["y_true"].values

    # 8) 评估 & 导出
    mets = metrics(y_true, y_pred)
    out = pd.DataFrame({
        "date": test_df["date"],
        "y_true(%)": y_true,
        "y_pred(%)": y_pred,
        "abs_err": np.abs(y_pred - y_true)
    })
    out.to_csv("./output_p1/pred_eval_2022_grid_interp.csv", index=False, encoding="utf-8-sig")
    pd.DataFrame({"date": grid2022, "yhat_grid(%)": yhat_grid}).to_csv(
        "./output_p1/pred_2022_grid_every4d.csv", index=False, encoding="utf-8-sig"
    )

    print("\n=== Evaluation on 2022 test points (grid→smooth→interp) ===")
    for k, v in mets.items():
        print(f"{k}: {v:.6f}")
    print("\n输出：")
    print(" - 等距预测：./output_p1/pred_2022_grid_every4d.csv")
    print(" - 测试集评估：./output_p1/pred_eval_2022_grid_interp.csv")

if __name__ == "__main__":
    main()
