
# -*- coding: utf-8 -*-
"""
综合相关性分析：多变量（去除 year） vs 就业率
------------------------------------------------
输入：
  - employ_rate.csv   （含日期列与就业率列，如：date / 时间，employment_rate / 就业率）
  - all_vars_yoy.csv  （含日期列与各类因子，建议为同比或已同尺度）
功能：
  1) 智能识别并对齐日期（默认内连接；可选最近匹配/按月季聚合/外连接插值）
  2) 过滤：仅保留数值列 + 自动去除名为/包含 'year'（含中文“年份”）的变量
  3) 计算：
     - Pearson / Spearman 相关 + p 值 + FDR(q) + 样本数
     - 偏相关（残差法，单变量对 Y 的“净相关”，控制全部其它因子）
  4) 作图（论文友好）：
     - 全矩阵热力图（Pearson / Spearman，带数值标注）
     - 与 Y 的相关发散条形图（Pearson / Spearman）
     - 与 Y 的偏相关发散条形图
     - Top-K 散点 + 回归线（按 |Pearson with Y| 排序）

用法示例：
  python corr_vars_yoy_all.py \
    --emp /mnt/data/employ_rate.csv \
    --vars /mnt/data/all_vars_yoy.csv \
    --outdir output_vars_corr \
    --align inner \
    --max_top 6 \
    --datecol_emp 时间 --datecol_vars 日期 --ycol 就业率
"""
import os, argparse, math, warnings, re
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from scipy.stats import pearsonr, spearmanr
from statsmodels.stats.multitest import multipletests
import statsmodels.api as sm

warnings.filterwarnings("ignore", category=FutureWarning)

# -------------------- Matplotlib 样式与中文 --------------------
def setup_matplotlib():
    zh_fonts = ["PingFang SC", "Microsoft YaHei", "SimHei", "STSong", "Heiti TC",
                "Arial Unicode MS", "WenQuanYi Zen Hei", "WenQuanYi Micro Hei"]
    have = {f.name for f in matplotlib.font_manager.fontManager.ttflist}
    for f in zh_fonts:
        if f in have:
            plt.rcParams["font.sans-serif"] = [f] + plt.rcParams.get("font.sans-serif", [])
            break
    plt.rcParams["axes.unicode_minus"] = False
    plt.rcParams.update({
        "figure.figsize": (9, 5.4),
        "figure.dpi": 150,
        "savefig.dpi": 300,
        "axes.spines.top": False,
        "axes.spines.right": False,
        "axes.grid": True,
        "grid.linestyle": "--",
        "grid.alpha": 0.3,
        "font.size": 11,
        # "axes.titleweight": "bold",
        # "axes.labelweight": "semibold",
        "axes.titlesize": 13,
        "legend.frameon": False,
    })

def nice_save(fig, outdir: str, name: str):
    os.makedirs(outdir, exist_ok=True)
    fig.savefig(os.path.join(outdir, f"{name}.png"), bbox_inches="tight")
    fig.savefig(os.path.join(outdir, f"{name}.pdf"), bbox_inches="tight")

# -------------------- 工具 --------------------
def _detect_date_col(df: pd.DataFrame):
    cand = ["date","时间","日期","Time","time"]
    low = {c.lower(): c for c in df.columns}
    for c in cand:
        if c.lower() in low:
            return low[c.lower()]
    # fallback: 首列
    return df.columns[0]

def _parse_dates(s: pd.Series) -> pd.Series:
    def try_one(x):
        for fmt in ("%Y-%m-%d","%Y/%m/%d","%Y.%m.%d","%Y-%m","%Y/%m","%Y.%m","%Y%m%d","%Y%m"):
            try:
                return datetime.strptime(str(x), fmt)
            except Exception:
                continue
        return pd.to_datetime(x, errors="coerce")
    return s.apply(try_one)

def _detect_y_col(df: pd.DataFrame):
    cand = ["employment_rate","employment","就业率","整体就业率","y","Y"]
    low = {c.lower(): c for c in df.columns}
    for c in cand:
        if c.lower() in low:
            return low[c.lower()]
    # fallback：最后一个数值列
    nums = df.select_dtypes(include=[np.number]).columns.tolist()
    return nums[-1] if nums else df.columns[-1]

def ensure_date_and_numeric(df: pd.DataFrame, datecol: str | None):
    df = df.copy()
    if datecol is None:
        datecol = _detect_date_col(df)
    df[datecol] = _parse_dates(df[datecol])
    for c in df.columns:
        if c != datecol:
            df[c] = pd.to_numeric(df[c], errors="coerce")
    df = df.dropna(subset=[datecol])
    return df.rename(columns={datecol: "date"})

def resample_df(df: pd.DataFrame, freq="M", how="mean"):
    agg = {"mean":"mean","last":"last","median":"median"}.get(how,"mean")
    return getattr(df.set_index("date").resample(freq), agg)().reset_index()

def merge_align(emp: pd.DataFrame, var: pd.DataFrame, align="inner", tolerance_days=40, freq="M", limit=2):
    emp = emp.copy(); var = var.copy()
    if align == "inner":
        df = pd.merge(emp, var, on="date", how="inner")
    elif align == "nearest":
        emp_s = emp.sort_values("date"); var_s = var.sort_values("date")
        df = pd.merge_asof(emp_s, var_s, on="date", direction="nearest", tolerance=pd.Timedelta(days=tolerance_days))
    elif align == "month":
        df = pd.merge(resample_df(emp,"M","mean"), resample_df(var,"M","last"), on="date", how="inner")
    elif align == "quarter":
        df = pd.merge(resample_df(emp,"Q","mean"), resample_df(var,"Q","last"), on="date", how="inner")
    elif align == "outer_ffill":
        all_dates = pd.DataFrame({"date": pd.date_range(min(emp["date"].min(), var["date"].min()),
                                                       max(emp["date"].max(), var["date"].max()), freq=freq)})
        emp2 = pd.merge(all_dates, emp, on="date", how="left").ffill(limit=limit).bfill(limit=limit)
        var2 = pd.merge(all_dates, var, on="date", how="left").ffill(limit=limit).bfill(limit=limit)
        df = pd.merge(emp2, var2, on="date", how="inner")
    else:
        raise ValueError(f"Unknown align: {align}")
    return df.dropna(how="all").dropna().sort_values("date").reset_index(drop=True)

def corr_with_p(a: pd.Series, b: pd.Series, method="pearson"):
    idx = a.dropna().index.intersection(b.dropna().index)
    a1, b1 = a.loc[idx], b.loc[idx]
    if len(a1) < 8:
        return np.nan, np.nan, len(a1)
    if method == "pearson":
        r, p = pearsonr(a1, b1)
    else:
        r, p = spearmanr(a1, b1)
    return float(r), float(p), len(a1)

def partial_corr_resid(y: pd.Series, X: pd.DataFrame, target: str):
    """残差法偏相关：corr(e_y|others, e_x|others)"""
    if (target is None) or (target not in X.columns):
        return np.nan
    cols = [c for c in X.columns if c != target]
    Z = pd.concat([y.rename("Y"), X[[target] + cols]], axis=1).dropna()
    if Z.shape[0] < max(8, len(cols)+3):  # 防止高维小样本
        return np.nan
    y_res = sm.OLS(Z["Y"], sm.add_constant(Z[cols]) if cols else np.ones((len(Z),1))).fit().resid if cols else Z["Y"]
    x_res = sm.OLS(Z[target], sm.add_constant(Z[cols]) if cols else np.ones((len(Z),1))).fit().resid if cols else Z[target]
    return float(np.corrcoef(y_res, x_res)[0,1])

def heatmap(ax, data: pd.DataFrame, title: str = "", vmin=-1, vmax=1, cmap="coolwarm"):
    im = ax.imshow(data.values, vmin=vmin, vmax=vmax, cmap=cmap, aspect="auto")
    ax.set_xticks(range(data.shape[1]))
    ax.set_yticks(range(data.shape[0]))
    ax.set_xticklabels(data.columns, rotation=45, ha="right")
    ax.set_yticklabels(data.index)
    ax.set_title(title)
    for i in range(data.shape[0]):
        for j in range(data.shape[1]):
            val = data.iat[i, j]
            txt = "NA" if pd.isna(val) else f"{val:.2f}"
            ax.text(j, i, txt, ha="center", va="center", fontsize=9)
    plt.colorbar(im, ax=ax, fraction=0.046, pad=0.04)

def diverging_bars(ax, s: pd.Series, title: str):
    # 清洗并排序
    s = s.copy().replace([np.inf, -np.inf], np.nan).dropna()
    if s.empty:
        ax.set_title(title + "（无可用数据）")
        ax.axis("off")
        return

    s_sorted = s.sort_values(ascending=True)
    ax.barh(s_sorted.index, s_sorted.values)
    ax.axvline(0, linewidth=1, color="black")
    ax.set_title(title)
    ax.set_xlabel("Correlation")
    ax.grid(True, axis="x", linestyle="--", alpha=0.3)

    # 用 annotate 来实现带偏移的标注（text 不支持 xytext）
    for i, v in enumerate(s_sorted.values):
        ax.annotate(
            f"{v:.2f}",
            xy=(v, i),
            xytext=(3 if v >= 0 else -3, 0),   # 向右/左微移，避免覆盖柱体
            textcoords="offset points",
            va="center",
            ha="left" if v >= 0 else "right",
            fontsize=9,
        )


def regression_line(x, y):
    mask = ~(np.isnan(x) | np.isnan(y))
    x, y = x[mask], y[mask]
    if len(x) < 3:
        return None, None
    A = np.vstack([x, np.ones_like(x)]).T
    beta, *_ = np.linalg.lstsq(A, y, rcond=None)
    return beta[0], beta[1]

# -------------------- 主流程 --------------------
def main():
    setup_matplotlib()

    ap = argparse.ArgumentParser()
    ap.add_argument("--emp", type=str, default="/Users/linshangjin/25CCM/NKU-C/t2/code_2/employ_rate.csv")
    ap.add_argument("--vars", type=str, default="/Users/linshangjin/25CCM/NKU-C/t2/code_2/all_vars_yoy.csv")
    ap.add_argument("--outdir", type=str, default="output_vars_corr")
    ap.add_argument("--datecol_emp", type=str, default=None)
    ap.add_argument("--datecol_vars", type=str, default=None)
    ap.add_argument("--ycol", type=str, default=None)
    ap.add_argument("--align", type=str, default="nearest",
                    choices=["inner","nearest","month","quarter","outer_ffill"])
    ap.add_argument("--tolerance_days", type=int, default=40)
    ap.add_argument("--freq", type=str, default="M")  # for outer_ffill
    ap.add_argument("--limit", type=int, default=2)  # for outer_ffill
    ap.add_argument("--max_top", type=int, default=6)  # Top-K 散点
    args = ap.parse_args()

    out_root = args.outdir
    out_tabs = os.path.join(out_root, "tables")
    out_figs = os.path.join(out_root, "figs")
    os.makedirs(out_tabs, exist_ok=True)
    os.makedirs(out_figs, exist_ok=True)

    # 读取
    emp = pd.read_csv(args.emp)
    var = pd.read_csv(args.vars)

    emp = ensure_date_and_numeric(emp, args.datecol_emp)
    var = ensure_date_and_numeric(var, args.datecol_vars)

    # y 列
    ycol = args.ycol if (args.ycol and args.ycol in emp.columns) else _detect_y_col(emp)
    emp = emp[["date", ycol]].copy()

    # 去掉非数值列，仅保留数值型
    var_num = var.drop(columns=["date"]).select_dtypes(include=[np.number])
    var = pd.concat([var[["date"]], var_num], axis=1)

    # 对齐
    df = merge_align(emp, var, align=args.align, tolerance_days=args.tolerance_days,
                     freq=args.freq, limit=args.limit)

    # 名称清洗 & 去 year 变量
    def clean_col(c):
        return str(c).strip().replace(" ", "_")
    df.columns = ["date"] + [clean_col(c) for c in df.columns[1:]]
    # 过滤 year
    drop_year = [c for c in df.columns if c != "date" and (
        c.lower() == "year" or "year" in c.lower() or "年份" in c or re.search(r"\b\d{4}\b", c) is not None
    )]
    keep_cols = [c for c in df.columns if c not in drop_year]
    df = df[keep_cols]
    if len(df) < 10 or df.drop(columns=["date"]).shape[1] < 2:
        raise ValueError("有效样本或变量数量太少，请检查日期对齐与列名。")
    
    LAG_YOY = 4  # 季度频率，4期为同比
    def yoy_diff(s, k=LAG_YOY):
        return s.diff(k)

    def yoy_growth(s, k=LAG_YOY):
        return (s - s.shift(k)) / s.shift(k)

    def yoy_logdiff(s, k=LAG_YOY):
        return np.log(s).diff(k)

    # 对所有数值列做同比
    numeric_cols = df.select_dtypes(include=[np.number]).columns
    df_yoy = df.copy()
    for col in numeric_cols:
        if "占比" in col or "%" in col:  # 占比类
            df_yoy[col] = yoy_diff(df[col])
        else:  # 量值类
            df_yoy[col] = yoy_logdiff(df[col])
            
    df = df_yoy


    # 数值矩阵
    numeric = df.drop(columns=["date"]).copy()
    # 相关矩阵
    pearson_full = numeric.corr(method="pearson")
    spearman_full = numeric.corr(method="spearman")
    pearson_full.to_csv(os.path.join(out_tabs, "corr_pearson_full.csv"), encoding="utf-8-sig")
    spearman_full.to_csv(os.path.join(out_tabs, "corr_spearman_full.csv"), encoding="utf-8-sig")

    # 与 Y 的相关 + p + FDR
    Xcols = [c for c in numeric.columns if c != ycol and c in numeric.columns]
    res_rows = []
    for c in Xcols:
        rP, pP, n = corr_with_p(numeric[c], numeric[ycol], method="pearson")
        rS, pS, _ = corr_with_p(numeric[c], numeric[ycol], method="spearman")
        res_rows.append({"variable": c, "pearson": rP, "pearson_p": pP,
                         "spearman": rS, "spearman_p": pS, "n_obs": n})
    corr_with_y = pd.DataFrame(res_rows).sort_values("pearson", ascending=False)
    mask = corr_with_y["pearson_p"].notna()
    if mask.any():
        _, qvals, *_ = multipletests(corr_with_y.loc[mask,"pearson_p"], method="fdr_bh")
        corr_with_y.loc[mask, "pearson_q"] = qvals
    corr_with_y.to_csv(os.path.join(out_tabs, "corr_with_y_full.csv"), index=False, encoding="utf-8-sig")

    # 偏相关（残差法）
    pc_rows = []
    X_all = numeric[Xcols].copy()
    y_series = numeric[ycol].copy()
    for c in Xcols:
        pc = partial_corr_resid(y_series, X_all, target=c)
        pc_rows.append({"variable": c, "partial_corr": pc})
    pc_df = pd.DataFrame(pc_rows).sort_values("partial_corr", key=lambda s: s.abs(), ascending=False)
    pc_df.to_csv(os.path.join(out_tabs, "partial_corr_with_y.csv"), index=False, encoding="utf-8-sig")

    # -------------------- 作图 --------------------
    # 1) 全矩阵热力图（Pearson）
    fig, ax = plt.subplots(figsize=(10.5, 7.2))
    heatmap(ax, pearson_full, title="Pearson Correlation (All Variables)")
    nice_save(fig, out_figs, "heatmap_pearson_full")
    plt.close(fig)

    # 2) 全矩阵热力图（Spearman）
    fig, ax = plt.subplots(figsize=(10.5, 7.2))
    heatmap(ax, spearman_full, title="Spearman Correlation (All Variables)")
    nice_save(fig, out_figs, "heatmap_spearman_full")
    plt.close(fig)

    # 3) 与 Y 的 Pearson 发散条形图
    if not corr_with_y.empty:
        fig, ax = plt.subplots(figsize=(9, max(5.0, 0.35*len(Xcols)+2)))
        s = corr_with_y.set_index("variable")["pearson"].dropna()
        diverging_bars(ax, s, title=f"Pearson Correlation with {ycol}")
        nice_save(fig, out_figs, "bars_corr_with_y_pearson")
        plt.close(fig)

        # Spearman
        fig, ax = plt.subplots(figsize=(9, max(5.0, 0.35*len(Xcols)+2)))
        s = corr_with_y.set_index("variable")["spearman"].dropna()
        diverging_bars(ax, s, title=f"Spearman Correlation with {ycol}")
        nice_save(fig, out_figs, "bars_corr_with_y_spearman")
        plt.close(fig)

    # 4) 与 Y 的偏相关发散条形图
    if not pc_df.empty:
        fig, ax = plt.subplots(figsize=(9, max(5.0, 0.35*len(pc_df)+2)))
        s = pc_df.set_index("variable")["partial_corr"].dropna()
        diverging_bars(ax, s, title=f"Partial Correlation with {ycol} (controls others)")
        nice_save(fig, out_figs, "bars_partial_corr_with_y")
        plt.close(fig)

    # 5) Top-K 散点 + 拟合线
    topk = min(max(1, int(args.max_top)), len(Xcols))
    order = corr_with_y.set_index("variable")["pearson"].abs().sort_values(ascending=False).head(topk).index.tolist()
    for c in order:
        x = numeric[c].values.astype(float)
        y = numeric[ycol].values.astype(float)
        fig, ax = plt.subplots(figsize=(6.4, 5.4))
        mask = np.isfinite(x) & np.isfinite(y)
        ax.scatter(x[mask], y[mask], s=32, alpha=0.85)
        k, b = regression_line(x, y)
        if k is not None:
            xs = np.linspace(np.nanmin(x[mask]), np.nanmax(x[mask]), 100)
            ax.plot(xs, k*xs + b, linewidth=2)
        ax.set_xlabel(c)
        ax.set_ylabel(ycol)
        ax.set_title(f"{c} vs {ycol}")
        nice_save(fig, out_figs, f"scatter_{c}_vs_{ycol}")
        plt.close(fig)

    print("[OK] 输出目录：", os.path.abspath(out_root))
    print(" - 表格:", os.listdir(out_tabs))
    print(" - 图片:", os.listdir(out_figs))

if __name__ == "__main__":
    main()
