# -*- coding: utf-8 -*-
"""
多方法多相关性分析（含同比差分）
- 自动统一日期列
- 自动去掉包含 year/年份 的列
- 对占比类变量做同比差分，对量值类变量做对数同比增长
- 输出 Pearson, Spearman, Kendall, 偏相关, 距离相关, 互信息, CCF, Granger
"""

import os
import re
import argparse
import numpy as np
import pandas as pd
from pathlib import Path
from scipy.stats import pearsonr, spearmanr, kendalltau
from sklearn.feature_selection import mutual_info_regression
import statsmodels.api as sm
from statsmodels.tsa.stattools import grangercausalitytests
import matplotlib.pyplot as plt
import seaborn as sns

# ======= 工具函数 =======
def ensure_date_and_numeric(df, datecol):
    if datecol not in df.columns:
        raise ValueError(f"缺少日期列 {datecol}")
    df[datecol] = pd.to_datetime(df[datecol], errors="coerce")
    for c in df.columns:
        if c != datecol:
            df[c] = pd.to_numeric(df[c], errors="coerce")
    df = df.rename(columns={datecol: "date"})
    return df

def _detect_y_col(df):
    """自动检测就业率列"""
    for c in df.columns:
        if "就业率" in c or "y" == c.lower():
            return c
    raise ValueError("未找到就业率列，请手动指定 --ycol")

def merge_align(df1, df2, align="inner", tolerance_days=None, freq=None, limit=None):
    """简单合并函数"""
    if freq:
        df1 = df1.set_index("date").resample(freq).mean().reset_index()
        df2 = df2.set_index("date").resample(freq).mean().reset_index()
    return pd.merge(df1, df2, on="date", how=align)

def corr_with_p(a, b, method="pearson"):
    idx = a.dropna().index.intersection(b.dropna().index)
    a1, b1 = a.loc[idx], b.loc[idx]
    if len(a1) < 6:
        return np.nan, np.nan, len(a1)
    if method == "pearson":
        r, p = pearsonr(a1, b1)
    elif method == "spearman":
        r, p = spearmanr(a1, b1)
    elif method == "kendall":
        r, p = kendalltau(a1, b1)
    return r, p, len(a1)

def partial_corr(y, X, target):
    cols = [c for c in X.columns if c != target]
    Z = pd.concat([y.rename("Y"), X[[target] + cols]], axis=1).dropna()
    if Z.shape[0] < 6:
        return np.nan
    y_res = sm.OLS(Z["Y"], sm.add_constant(Z[cols])).fit().resid if cols else Z["Y"]
    x_res = sm.OLS(Z[target], sm.add_constant(Z[cols])).fit().resid if cols else Z[target]
    return float(np.corrcoef(y_res, x_res)[0,1])

def distance_corr(x, y):
    x, y = np.atleast_2d(x).T, np.atleast_2d(y).T
    n = x.shape[0]
    if n < 2:
        return np.nan
    a = np.abs(x - x.T)
    b = np.abs(y - y.T)
    A = a - a.mean(axis=0) - a.mean(axis=1)[:, None] + a.mean()
    B = b - b.mean(axis=0) - b.mean(axis=1)[:, None] + b.mean()
    dcov2_xy = (A * B).sum() / (n * n)
    dcov2_xx = (A * A).sum() / (n * n)
    dcov2_yy = (B * B).sum() / (n * n)
    if dcov2_xx * dcov2_yy == 0:
        return 0
    return np.sqrt(dcov2_xy) / np.sqrt(np.sqrt(dcov2_xx * dcov2_yy))

def ccf_best_lag(x, y, maxlag=8):
    idx = x.dropna().index.intersection(y.dropna().index)
    xs, ys = x.loc[idx], y.loc[idx]
    if len(xs) < maxlag + 2:
        return np.nan, np.nan
    corr = [xs.corr(ys.shift(lag)) for lag in range(-maxlag, maxlag+1)]
    lags = list(range(-maxlag, maxlag+1))
    best_idx = int(np.nanargmax(np.abs(corr)))
    return lags[best_idx], corr[best_idx]

def granger_min_p(x, y, maxlag=6):
    df_ = pd.concat([y.rename("Y"), x.rename("X")], axis=1).dropna()
    if len(df_) <= maxlag + 2:
        return np.nan, np.nan
    try:
        gt = grangercausalitytests(df_[["Y","X"]], maxlag=maxlag, verbose=False)
        items = [(L, gt[L][0]["ssr_ftest"][1]) for L in gt]
        bestlag, pmin = min(items, key=lambda t: t[1])
        return bestlag, pmin
    except:
        return np.nan, np.nan

def diverging_bars(ax, s: pd.Series, title: str):
    s = s.replace([np.inf, -np.inf], np.nan).dropna()
    if s.empty:
        ax.set_title(title + "（无数据）")
        ax.axis("off")
        return
    s_sorted = s.sort_values()
    ax.barh(s_sorted.index, s_sorted.values,
            color=["#2ca02c" if v>=0 else "#d62728" for v in s_sorted.values])
    ax.axvline(0, color="black", lw=1)
    ax.set_title(title)
    for i, v in enumerate(s_sorted.values):
        ax.annotate(f"{v:.2f}", xy=(v, i), xytext=(3 if v>=0 else -3, 0),
                    textcoords="offset points", va="center",
                    ha="left" if v>=0 else "right", fontsize=8)

# ======= 主函数 =======
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--emp", type=str, default="/Users/linshangjin/25CCM/NKU-C/t2/code_2/employ_rate.csv")
    parser.add_argument("--vars", type=str, default="/Users/linshangjin/25CCM/NKU-C/t2/code_2/all_vars_yoy.csv")
    parser.add_argument("--outdir", default="output_corr")
    parser.add_argument("--datecol_emp", default="时间")
    parser.add_argument("--datecol_vars", default="日期")
    parser.add_argument("--ycol", default="整体就业率")
    parser.add_argument("--align", default="inner")
    parser.add_argument("--freq", default="Q")
    parser.add_argument("--yoy_lag", type=int, default=4)
    parser.add_argument("--maxlag_ccf", type=int, default=8)
    parser.add_argument("--maxlag_granger", type=int, default=6)
    args = parser.parse_args()

    out_tabs = Path(args.outdir) / "tables"
    out_figs = Path(args.outdir) / "figs"
    out_tabs.mkdir(parents=True, exist_ok=True)
    out_figs.mkdir(parents=True, exist_ok=True)

    # === 读取并对齐 ===
    emp = ensure_date_and_numeric(pd.read_csv(args.emp), args.datecol_emp)
    var = ensure_date_and_numeric(pd.read_csv(args.vars), args.datecol_vars)

    ycol = args.ycol if (args.ycol and args.ycol in emp.columns) else _detect_y_col(emp)
    emp = emp[["date", ycol]].copy()

    var_num = var.drop(columns=["date"]).select_dtypes(include=[np.number])
    var = pd.concat([var[["date"]], var_num], axis=1)

    df = merge_align(emp, var, align=args.align, freq=args.freq)

    # 清理列名 & 去掉 year
    df.columns = ["date"] + [str(c).strip().replace(" ", "_") for c in df.columns[1:]]
    drop_year = [c for c in df.columns if c != "date" and (
        c.lower() == "year" or "year" in c.lower() or "年份" in c or re.search(r"\b\d{4}\b", c)
    )]
    df = df[[c for c in df.columns if c not in drop_year]]

    if len(df) < 10 or df.drop(columns=["date"]).shape[1] < 2:
        raise ValueError("有效样本或变量数量太少，请检查日期对齐与列名。")

    # === 同比差分 ===
    numeric_cols = df.select_dtypes(include=[np.number]).columns
    df_yoy = df.copy()
    for col in numeric_cols:
        if "占比" in col or "%" in col:
            df_yoy[col] = df[col].diff(args.yoy_lag)
        else:
            df_yoy[col] = np.log(df[col]).diff(args.yoy_lag)
    df = df_yoy.set_index("date")

    # === 相关性分析 ===
    y = df[ycol]
    others = [c for c in df.columns if c != ycol]

    # Pearson / Spearman / Kendall
    rows = []
    for m in ["pearson", "spearman", "kendall"]:
        for c in others:
            r, p, n = corr_with_p(df[c], y, method=m)
            rows.append({"method": m, "variable": c, "r": r, "p": p, "n": n})
    corr_tab = pd.DataFrame(rows)
    corr_tab.to_csv(out_tabs / "corr_with_y.csv", index=False, encoding="utf-8-sig")

    # 偏相关
    pc_rows = []
    X_pc = df[others]
    for c in others:
        pc = partial_corr(y, X_pc, c)
        pc_rows.append({"variable": c, "partial_corr": pc})
    pd.DataFrame(pc_rows).to_csv(out_tabs / "partial_corr_with_y.csv", index=False, encoding="utf-8-sig")

    # 距离相关
    dc_rows = []
    for c in others:
        dc = distance_corr(df[c].dropna(), y.dropna())
        dc_rows.append({"variable": c, "distance_corr": dc})
    pd.DataFrame(dc_rows).to_csv(out_tabs / "distance_corr_with_y.csv", index=False, encoding="utf-8-sig")

    # 互信息
    mi_rows = []
    mask = y.notna()
    for c in others:
        mask &= df[c].notna()
        if mask.sum() < 6:
            mi_rows.append({"variable": c, "mutual_info": np.nan})
        else:
            mi = mutual_info_regression(df.loc[mask, [c]], y.loc[mask], random_state=0)
            mi_rows.append({"variable": c, "mutual_info": mi[0]})
    pd.DataFrame(mi_rows).to_csv(out_tabs / "mutual_info_with_y.csv", index=False, encoding="utf-8-sig")

    # CCF
    ccf_rows = []
    for c in others:
        lag, val = ccf_best_lag(df[c], y, maxlag=args.maxlag_ccf)
        ccf_rows.append({"variable": c, "best_lag": lag, "ccf": val})
    pd.DataFrame(ccf_rows).to_csv(out_tabs / "ccf_with_y.csv", index=False, encoding="utf-8-sig")

    # Granger
    gr_rows = []
    for c in others:
        bestlag, pmin = granger_min_p(df[c], y, maxlag=args.maxlag_granger)
        gr_rows.append({"variable": c, "best_lag": bestlag, "pmin": pmin})
    pd.DataFrame(gr_rows).to_csv(out_tabs / "granger_with_y.csv", index=False, encoding="utf-8-sig")

    # === 作图 ===
    sns.set_style("whitegrid")
    for m in ["pearson", "spearman", "kendall"]:
        fig, ax = plt.subplots(figsize=(6, max(4, len(others)*0.3)))
        data = corr_tab.query("method == @m").set_index("variable")["r"]
        diverging_bars(ax, data, f"{m.title()} Correlation with {ycol}")
        plt.tight_layout()
        fig.savefig(out_figs / f"bars_corr_{m}.png", dpi=300)
        plt.close(fig)

    print(f"✅ 分析完成，结果保存在: {args.outdir}")

if __name__ == "__main__":
    main()
