# -*- coding: utf-8 -*-
# 修正版：逐年回归插值 + 使用全局最小时间间隔；修复 dayofyear 报错
INPUT_XLSX = "P1sortdata.xlsx"   # 你的文件名
SHEET_NAME = 0

# 若你想手动指定列名，在这里写死；否则留空自动识别
DATE_COL_FIXED  = None
VALUE_COL_FIXED = None

OUTPUT_DIR = "./output_p1"
RESOLUTION_DAYS = "auto"      # "auto" 用全局最小正间隔；或改成整数（如 3）
TARGET_START_MMDD = (3, 1)    # 仅建模/插值/预测 3/1~7/1
TARGET_END_MMDD   = (7, 1)

import os, warnings, math
import numpy as np
import pandas as pd
from datetime import timedelta
warnings.filterwarnings("ignore")

# 回归与时间序列
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from statsmodels.tsa.statespace.sarimax import SARIMAX

from scipy.interpolate import interp1d

os.makedirs(OUTPUT_DIR, exist_ok=True)

# =========== 工具函数 ===========
def _infer_cols(df: pd.DataFrame):
    if DATE_COL_FIXED and VALUE_COL_FIXED:
        return DATE_COL_FIXED, VALUE_COL_FIXED
    # 自动识别
    date_candidates  = ["日期","date","时间","Date","time","Time"]
    value_candidates = ["就业率","就业率(%)","就业率（%）","y","value","rate"]
    date_col = None
    for c in df.columns:
        try:
            pd.to_datetime(df[c], errors="raise")
            date_col = c; break
        except Exception:
            continue
    if date_col is None:
        for c in date_candidates:
            if c in df.columns: date_col = c; break
    val_col = None
    for c in df.columns:
        if c == date_col: continue
        s = pd.to_numeric(df[c], errors="coerce")
        if s.notna().sum() >= max(5, int(0.3*len(s))):
            val_col = c
            if any(k in str(c) for k in ["就","rate","value","%"]):
                break
    if val_col is None:
        for c in value_candidates:
            if c in df.columns: val_col = c; break
    if date_col is None or val_col is None:
        raise ValueError(f"无法识别日期/数值列，请手动设置。列: {df.columns.tolist()}")
    return date_col, val_col

def load_and_clean(path, sheet=0):
    df = pd.read_excel(path, sheet_name=sheet)
    dcol, vcol = _infer_cols(df)
    df = df[[dcol, vcol]].copy()
    df.columns = ["date","y"]
    df["date"] = pd.to_datetime(df["date"], errors="coerce")
    df["y"]    = pd.to_numeric(df["y"], errors="coerce")
    df = df.dropna().drop_duplicates(subset=["date"]).sort_values("date").reset_index(drop=True)

    # 单位到 [0,100]
    if df["y"].median() <= 1.5 and df["y"].max() <= 1.2:
        df["y"] = df["y"] * 100.0
    df["y"] = df["y"].clip(0, 100)
    df["year"] = df["date"].dt.year

    # 计算全局最小正间隔（天）
    gaps = []
    for y, g in df.groupby("year"):
        d = g["date"].sort_values().diff().dt.days
        pos = d[d > 0]
        if len(pos): gaps.append(int(pos.min()))
    global_min_gap = min(gaps) if gaps else 3
    return df, global_min_gap

def make_target_grid(year, start_mmdd, end_mmdd, step_days):
    start = pd.Timestamp(year=year, month=start_mmdd[0], day=start_mmdd[1])
    end   = pd.Timestamp(year=year, month=end_mmdd[0],   day=end_mmdd[1])
    return pd.date_range(start, end, freq=f"{int(step_days)}D")

def _aicc(n, sse, k):
    # AICc = n*ln(SSE/n) + 2k + 2k(k+1)/(n-k-1)
    if n <= k + 1: 
        return np.inf
    return n*np.log(max(sse/n, 1e-12)) + 2*k + (2*k*(k+1))/(n - k - 1)

def fit_poly_reg(x, y, deg, alpha=1e-6):
    model = Pipeline([
        ("poly", PolynomialFeatures(degree=deg, include_bias=True)),
        ("ridge", Ridge(alpha=alpha, fit_intercept=False, random_state=0))
    ])
    model.fit(x.reshape(-1,1), y)
    yhat = model.predict(x.reshape(-1,1))
    sse = float(np.sum((y - yhat)**2))
    k = deg + 1  # 含常数项
    aicc = _aicc(len(y), sse, k)
    return model, aicc

def per_year_regression_interpolate(df, step_days):
    """
    对每个年份在 [3/1,7/1] 区间内：用年内天序做回归（度数 1/2/3，AICc 选最优），
    再在等间隔栅格上生成“插值”。
    """
    out = []
    for y, g in df.groupby("year"):
        # 优先使用当年窗口内点
        start = pd.Timestamp(year=y, month=TARGET_START_MMDD[0], day=TARGET_START_MMDD[1])
        end   = pd.Timestamp(year=y, month=TARGET_END_MMDD[0],   day=TARGET_END_MMDD[1])
        g_win = g[(g["date"] >= start) & (g["date"] <= end)].copy()
        g_use = g_win if len(g_win) >= 2 else g.copy()  # 不足则退回全年

        # 若仍不足 2 个点，回退到线性/最近邻
        if len(g_use) < 2:
            grid = make_target_grid(y, TARGET_START_MMDD, TARGET_END_MMDD, step_days)
            # 最近邻/线性回退
            if len(g) >= 2:
                xi = g["date"].astype("int64").values
                yi = g["y"].values
                try:
                    f = interp1d(xi, yi, kind="linear", fill_value="extrapolate")
                except Exception:
                    f = interp1d(xi, yi, kind="nearest", fill_value="extrapolate")
                yg = pd.Series(f(grid.astype("int64").values), index=grid)
            else:
                # 只有一个点：全填同值
                yg = pd.Series(np.repeat(g["y"].iloc[0], len(grid)), index=grid)
            yg = yg.clip(0, 100)
            out.append(pd.DataFrame({"date": yg.index, "y": yg.values, "year": y}))
            continue

        # 用年内天序作为自变量
        x = g_use["date"].dt.dayofyear.values.astype(float)
        yv = g_use["y"].values.astype(float)

        # 尝试度数 1/2/3，用 AICc 选最优
        best = (None, np.inf, None)  # (deg, aicc, model)
        for deg in (1, 2, 3):
            try:
                model, aicc = fit_poly_reg(x, yv, deg)
                if aicc < best[1]:
                    best = (deg, aicc, model)
            except Exception:
                continue
        if best[2] is None:
            # 极端保底
            deg, model = 1, Ridge().fit(x.reshape(-1,1), yv)

        model = best[2]
        grid = make_target_grid(y, TARGET_START_MMDD, TARGET_END_MMDD, step_days)
        xg = grid.dayofyear.values.astype(float)
        yg = model.predict(xg.reshape(-1,1))
        yg = np.clip(yg, 0, 100)

        out.append(pd.DataFrame({"date": grid, "y": yg, "year": y}))
    if not out:
        raise RuntimeError("逐年回归插值后为空，请检查数据。")
    return pd.concat(out, ignore_index=True).sort_values(["year","date"]).reset_index(drop=True)

def make_fourier_terms(dates, K=3, period=365.25):
    # 兼容 Series / DatetimeIndex / array-like
    dates = pd.to_datetime(dates)
    if isinstance(dates, pd.Series):
        doy = dates.dt.dayofyear.values.astype(float)
        idx = dates.values
    else:
        di = pd.DatetimeIndex(dates)
        doy = di.dayofyear.values.astype(float)
        idx = di
    X = {}
    for k in range(1, K+1):
        X[f"sin_{k}"] = np.sin(2*np.pi*k * doy / period)
        X[f"cos_{k}"] = np.cos(2*np.pi*k * doy / period)
    return pd.DataFrame(X, index=idx)

def select_arima_order(y, exog, orders=((0,1,0),(1,1,0),(0,1,1),(1,1,1),(2,1,1))):
    best = None
    for p,d,q in orders:
        try:
            res = SARIMAX(y, order=(p,d,q), exog=exog,
                          enforce_stationarity=False, enforce_invertibility=False).fit(disp=False)
            if (best is None) or (res.aic < best[0]):
                best = (res.aic, (p,d,q), res)
        except Exception:
            continue
    if best is None:
        res = SARIMAX(y, order=(0,1,0), exog=exog,
                      enforce_stationarity=False, enforce_invertibility=False).fit(disp=False)
        return (0,1,0), res
    return best[1], best[2]

def train_and_forecast(interped_df):
    data = interped_df.sort_values("date").reset_index(drop=True)

    # 训练集：历史年（<2025）
    train = data[data["date"].dt.year < 2025].copy()
    if len(train) < 10:
        raise RuntimeError("训练样本太少，无法建模。请检查数据或扩大年份范围。")

    X_train = make_fourier_terms(train["date"], K=3)
    y_train = train["y"].values
    order, res = select_arima_order(y_train, X_train)
    print(f"[Info] ARIMA 选阶: {order}, AIC={res.aic:.2f}")

    def make_range(year):
        step = int(np.round((train["date"].diff().dt.days.dropna()).median()))  # 不强依赖，但与训练频率近似
        # 我们还是用插值阶段的分辨率更稳妥：取历史插值中的 step
        # 这里改成通过已有函数生成：
        return make_target_grid(year, TARGET_START_MMDD, TARGET_END_MMDD, step_days)

    out_all = []
    for year in [2025, 2026]:
        idx = make_target_grid(year, TARGET_START_MMDD, TARGET_END_MMDD, step_days)
        X_fut = make_fourier_terms(pd.Series(idx), K=3)
        fc = res.get_forecast(steps=len(idx), exog=X_fut)
        yhat = np.clip(fc.predicted_mean.values, 0, 100)
        out = pd.DataFrame({"date": idx, "就业率（%）": yhat, "year": year})
        out_all.append(out)
        out.to_csv(os.path.join(OUTPUT_DIR, f"pred_{year}_0301_0701_every{int(step_days)}d.csv"),
                   index=False, encoding="utf-8-sig")
    return pd.concat(out_all, ignore_index=True)

# =========== 主流程 ===========
def main():
    global step_days
    df, global_min_gap = load_and_clean(INPUT_XLSX, SHEET_NAME)
    step_days = global_min_gap if RESOLUTION_DAYS == "auto" else int(RESOLUTION_DAYS)
    step_days = max(1, int(step_days))
    print(f"[Info] 全局最小正间隔 = {global_min_gap} 天 -> 统一分辨率 step_days = {step_days} 天")

    df.to_csv(os.path.join(OUTPUT_DIR, "cleaned_all.csv"), index=False, encoding="utf-8-sig")

    interped = per_year_regression_interpolate(df, step_days)
    interped[["date","y","year"]].to_csv(
        os.path.join(OUTPUT_DIR, f"interpolated_regression_0301_0701_every{step_days}d.csv"),
        index=False, encoding="utf-8-sig"
    )
    print(f"[Info] 已输出逐年回归插值后的 3/1~7/1 等间隔序列。")

    preds = train_and_forecast(interped)
    preds.to_csv(os.path.join(OUTPUT_DIR, f"pred_2025_2026_0301_0701_every{step_days}d_all.csv"),
                 index=False, encoding="utf-8-sig")
    print("[Done] 2025/2026 3/1~7/1 预测已导出。")

if __name__ == "__main__":
    main()
