# -*- coding: utf-8 -*-
"""
定量分析：GDP 与 三次产业占比 vs 就业率（季度，YoY）
- 先同频对齐：就业率(日/半月) → 季度均值
- 变换：GDP 用对数同比（log-diff 4期）；占比 & 就业率用同比差分（diff 4期）
- 输出：
  1) corr_gdp_shares_yoy.csv           # Pearson/Spearman + p值 + FDR(q) + 样本数
  2) partial_corr_gdp_shares_yoy.csv   # 偏相关（控制其它变量）
  3) ols_hac_gdp_shares_yoy.csv        # 多变量回归（HAC稳健SE）
  4) granger_gdp_shares_yoy.csv        # Granger p最小值 & 对应滞后
  5) oos_arx_gdp_shares_yoy.csv        # OOS：AR(1) vs AR(1)+X 的RMSE对比（ΔRMSE）
"""

import os, warnings
import numpy as np
import pandas as pd
import statsmodels.api as sm
from scipy.stats import pearsonr, spearmanr
from statsmodels.stats.multitest import multipletests
from statsmodels.tsa.stattools import adfuller, grangercausalitytests

# ================= 配置 =================
PATH_EMP = "/Users/linshangjin/25CCM/NKU-C/dataset_csv/data_processed.xlsx"   # 你的就业率预测（多Sheet：date, employment_rate）
PATH_GDP = "/Users/linshangjin/25CCM/NKU-C/dataset_csv/GDP_季度数据.csv"       # 季度GDP表（含：GDP, 第一产业占比, 第二产业占比, 第三产业占比）
OUT_DIR  = "/Users/linshangjin/25CCM/NKU-C/t2/三次产业/定性"                       # 输出目录
FREQ     = "Q"                               # 季度
LAG_YOY  = 4                                 # 季度同比滞后
MAX_LAG_GR = 6                               # Granger最大滞后（季）
OOS_START_MIN = 12                           # OOS起始训练长度（季度数）

os.makedirs(OUT_DIR, exist_ok=True)
warnings.filterwarnings("ignore", category=FutureWarning)

# ================= 工具函数 =================
def read_employment_xlsx(path):
    xl = pd.ExcelFile(path)
    parts = []
    for sh in xl.sheet_names:
        t = xl.parse(sh)
        if not {"date", "employment_rate"}.issubset(t.columns):
            raise ValueError(f"{sh} 需含列 ['date','employment_rate']，实际：{list(t.columns)}")
        t = t[["date","employment_rate"]].copy()
        t["date"] = pd.to_datetime(t["date"])
        t["employment_rate"] = pd.to_numeric(t["employment_rate"], errors="coerce")
        parts.append(t)
    df = pd.concat(parts, ignore_index=True).dropna(subset=["date"]).sort_values("date")
    return df.set_index("date").sort_index()

def read_gdp_csv(path):
    df = pd.read_csv(path, encoding="utf-8")
    if "date" in df.columns:
        pass
    elif "Unnamed: 0" in df.columns:
        df = df.rename(columns={"Unnamed: 0":"date"})
    else:
        raise ValueError("GDP CSV 缺少日期列（date 或 Unnamed: 0）")
    df["date"] = pd.to_datetime(df["date"])
    for c in df.columns:
        if c != "date":
            df[c] = pd.to_numeric(df[c], errors="coerce")
    return df.set_index("date").sort_index()

def yoy_log(s, k=4):
    return np.log(s).diff(k)

def yoy_diff(s, k=4):
    return s.diff(k)

def corr_with_p(a, b, method="pearson"):
    idx = a.dropna().index.intersection(b.dropna().index)
    a1, b1 = a.loc[idx], b.loc[idx]
    if len(a1) < 8:
        return np.nan, np.nan, len(a1)
    if method == "pearson":
        r, p = pearsonr(a1, b1)
    else:
        r, p = spearmanr(a1, b1)
    return float(r), float(p), len(a1)

def partial_corr(y, X, target):
    """回归残差法偏相关：corr(resid_y_on_others, resid_x_on_others)"""
    if (target is None) or (target not in X.columns):
        return np.nan
    cols = [c for c in X.columns if c != target]
    Z = pd.concat([y.rename("Y"), X[[target] + cols]], axis=1).dropna()
    if Z.shape[0] < 8:
        return np.nan
    y_res = sm.OLS(Z["Y"], sm.add_constant(Z[cols])).fit().resid if cols else Z["Y"]
    x_res = sm.OLS(Z[target], sm.add_constant(Z[cols])).fit().resid if cols else Z[target]
    return float(np.corrcoef(y_res, x_res)[0,1])

def granger_min_p(x, y, maxlag=6):
    """检验 x->y；返回(最佳滞后, p最小值)。要求已平稳/近似平稳"""
    df_ = pd.concat([y.rename("Y"), x.rename("X")], axis=1).dropna()
    if len(df_) <= maxlag + 6:
        return np.nan, np.nan
    try:
        gt = grangercausalitytests(df_[["Y","X"]], maxlag=maxlag, verbose=False)
        items = [(L, gt[L][0]["ssr_ftest"][1]) for L in gt]
        bestlag, pmin = min(items, key=lambda t: t[1])
        return bestlag, float(pmin)
    except Exception:
        return np.nan, np.nan

def expanding_oos_arx(y, x, start_min=12):
    """一步预测：AR(1) 基线 vs AR(1)+X_t；返回 (rmse_base, rmse_withX, delta)"""
    df_ = pd.concat([y.rename("Y"), x.rename("X")], axis=1).dropna()
    if len(df_) < start_min + 8:
        return np.nan, np.nan, np.nan
    preds_b, preds_x, actuals = [], [], []
    for t in range(start_min, len(df_)):
        train = df_.iloc[:t]
        # baseline: AR(1)
        tb = train.copy()
        tb["Y_l1"] = tb["Y"].shift(1)
        tb = tb.dropna()
        if len(tb) < 8:
            preds_b.append(np.nan); preds_x.append(np.nan); actuals.append(np.nan); continue
        mb = sm.OLS(tb["Y"], sm.add_constant(tb[["Y_l1"]])).fit()
        y_l1 = df_.iloc[t-1]["Y"]
        pred_b = mb.predict(sm.add_constant(pd.DataFrame({"Y_l1":[y_l1]})))[0]
        # ARX: Y~Y_{t-1}+X_t
        tx = train.copy()
        tx["Y_l1"] = tx["Y"].shift(1)
        tx = tx.dropna()
        mx = sm.OLS(tx["Y"], sm.add_constant(tx[["Y_l1","X"]])).fit()
        y_l1 = df_.iloc[t-1]["Y"]; x_t = df_.iloc[t]["X"]
        pred_x = mx.predict(sm.add_constant(pd.DataFrame({"Y_l1":[y_l1],"X":[x_t]})))[0]
        preds_b.append(pred_b); preds_x.append(pred_x); actuals.append(df_.iloc[t]["Y"])
    preds_b, preds_x, actuals = map(lambda z: np.array(z, dtype=float), (preds_b, preds_x, actuals))
    mask = np.isfinite(preds_b) & np.isfinite(preds_x) & np.isfinite(actuals)
    if mask.sum() < 8:
        return np.nan, np.nan, np.nan
    err_b = actuals[mask] - preds_b[mask]
    err_x = actuals[mask] - preds_x[mask]
    rmse_b = float(np.sqrt(np.mean(err_b**2)))
    rmse_x = float(np.sqrt(np.mean(err_x**2)))
    return rmse_b, rmse_x, (rmse_b - rmse_x)

# ================= 读取与同频对齐 =================
emp = read_employment_xlsx(PATH_EMP)                       # 日/半月就业率
y_q = emp["employment_rate"].resample(FREQ).mean().rename("y_pred_q").dropna()

gdp = read_gdp_csv(PATH_GDP)                               # 季度GDP数据
keep_cols = [c for c in ["GDP","第一产业占比","第二产业占比","第三产业占比"] if c in gdp.columns]
gdp = gdp[keep_cols].resample(FREQ).last()

joint = gdp.join(y_q, how="inner")                         # 对齐季度样本

# ================= 同比转换（YoY） =================
GDP_yoy  = yoy_log(joint["GDP"], LAG_YOY).rename("GDP_YoY")
S1_yoy   = yoy_diff(joint["第一产业占比"], LAG_YOY).rename("第一产业占比_YoY")
S2_yoy   = yoy_diff(joint["第二产业占比"], LAG_YOY).rename("第二产业占比_YoY")
S3_yoy   = yoy_diff(joint["第三产业占比"], LAG_YOY).rename("第三产业占比_YoY")
Y_yoy    = yoy_diff(joint["y_pred_q"], LAG_YOY).rename("就业率_YoY")

# 组合为一个DataFrame并对齐
DF = pd.concat([Y_yoy, GDP_yoy, S1_yoy, S2_yoy, S3_yoy], axis=1)

# ================= 1) 相关（Pearson/Spearman + p + FDR） =================
results = []
for var in ["GDP_YoY","第一产业占比_YoY","第二产业占比_YoY","第三产业占比_YoY"]:
    rP, pP, n = corr_with_p(DF[var], DF["就业率_YoY"], method="pearson")
    rS, pS, _ = corr_with_p(DF[var], DF["就业率_YoY"], method="spearman")
    results.append({"variable": var, "pearson": rP, "pearson_p": pP,
                    "spearman": rS, "spearman_p": pS, "n_obs": n})
corr_tab = pd.DataFrame(results).sort_values("pearson", ascending=False)
mask = corr_tab["pearson_p"].notna()
if mask.any():
    _, qvals, *_ = multipletests(corr_tab.loc[mask,"pearson_p"], method="fdr_bh")
    corr_tab.loc[mask,"pearson_q"] = qvals
corr_tab.to_csv(os.path.join(OUT_DIR, "corr_gdp_shares_yoy.csv"), index=False)

# ================= 2) 偏相关（控制其它变量） =================
# 注意：三占比和=100%会“完全共线”，偏相关/回归会发散。
# 解决：在偏相关与回归中**去掉一个占比**（例如去掉“第三产业占比_YoY”），避免精确共线。
X_pc = DF[["GDP_YoY","第一产业占比_YoY","第二产业占比_YoY"]].copy()
pc_rows = []
for var in ["GDP_YoY","第一产业占比_YoY","第二产业占比_YoY"]:
    pc = partial_corr(DF["就业率_YoY"], X_pc, target=var)
    pc_rows.append({"variable": var, "partial_corr_yoy": pc})
pd.DataFrame(pc_rows).to_csv(os.path.join(OUT_DIR, "partial_corr_gdp_shares_yoy.csv"), index=False)

# ================= 3) 多变量回归（HAC稳健标准误） =================
# 就业率_YoY ~ GDP_YoY + 第一产业占比_YoY + 第二产业占比_YoY（去掉第三产业占比_YoY）
reg = DF[["就业率_YoY","GDP_YoY","第一产业占比_YoY","第二产业占比_YoY"]].dropna().copy()
reg["const"] = 1.0
model = sm.OLS(reg["就业率_YoY"], reg[["const","GDP_YoY","第一产业占比_YoY","第二产业占比_YoY"]]).fit(
    cov_type="HAC", cov_kwds={"maxlags":4}  # 季度数据，NW滞后设4较保守
)
coef = model.params
se   = model.bse
tval = model.tvalues
pval = model.pvalues
ols_tab = pd.DataFrame({
    "param": coef.index,
    "coef": coef.values,
    "std_err(HAC)": se.values,
    "t": tval.values,
    "p": pval.values
})
ols_tab.to_csv(os.path.join(OUT_DIR, "ols_hac_gdp_shares_yoy.csv"), index=False)

# ================= 4) Granger（YoY） =================
# 为稳定起见，先检查平稳（通常YoY已接近平稳），不再额外差分。
gr_rows = []
for var in ["GDP_YoY","第一产业占比_YoY","第二产业占比_YoY","第三产业占比_YoY"]:
    bestlag, pmin = granger_min_p(DF[var], DF["就业率_YoY"], maxlag=MAX_LAG_GR)
    gr_rows.append({"variable": var, "best_lag_q": bestlag, "pmin": pmin})
pd.DataFrame(gr_rows).to_csv(os.path.join(OUT_DIR, "granger_gdp_shares_yoy.csv"), index=False)

# ================= 5) OOS 增量预测（AR(1) vs AR(1)+X） =================
oos_rows = []
for var in ["GDP_YoY","第一产业占比_YoY","第二产业占比_YoY","第三产业占比_YoY"]:
    rmse_b, rmse_x, delta = expanding_oos_arx(DF["就业率_YoY"], DF[var], start_min=OOS_START_MIN)
    oos_rows.append({"variable": var, "rmse_baseline": rmse_b, "rmse_with_X": rmse_x, "rmse_improvement": delta})
pd.DataFrame(oos_rows).to_csv(os.path.join(OUT_DIR, "oos_arx_gdp_shares_yoy.csv"), index=False)

print("✅ Done. 输出文件：")
print(" -", os.path.join(OUT_DIR, "corr_gdp_shares_yoy.csv"))
print(" -", os.path.join(OUT_DIR, "partial_corr_gdp_shares_yoy.csv"))
print(" -", os.path.join(OUT_DIR, "ols_hac_gdp_shares_yoy.csv"))
print(" -", os.path.join(OUT_DIR, "granger_gdp_shares_yoy.csv"))
print(" -", os.path.join(OUT_DIR, "oos_arx_gdp_shares_yoy.csv"))
