# -*- coding: utf-8 -*-
"""
Correlation Analysis with Flexible Date Alignment
-------------------------------------------------
When employment_rate.csv and all_vars_yoy.csv have misaligned dates:
- Align by nearest date within tolerance (merge_asof)
- Or align by month/quarter periods (group & aggregate)
- Or do an outer union with interpolation on each side

Then compute:
- Pearson/Spearman correlations
- Partial correlations (precision matrix)
- Cross-correlation (lag) vs. employment rate
- Export publication-ready figs (PNG/PDF) + tables (CSV)

Example:
  python corr_employment_alignment.py \
    --emp /mnt/data/employ_rate.csv \
    --vars /mnt/data/all_vars_yoy.csv \
    --align nearest --tolerance_days 40 \
    --outdir output_corr_nearest

Other alignment modes:
  --align month     # align by calendar month
  --align quarter   # align by calendar quarter (Q1..Q4)
  --align outer_ffill --freq M --limit 2  # outer-join + asfreq + ffill/bfill
"""
import argparse
from pathlib import Path
from datetime import datetime, timedelta
import math
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt

# 优先使用常见中文字体
zh_fonts = ["SimHei", "Microsoft YaHei", "PingFang SC", "STSong", "Heiti TC", "Arial Unicode MS"]
available = set(f.name for f in matplotlib.font_manager.fontManager.ttflist)

for f in zh_fonts:
    if f in available:
        plt.rcParams["font.sans-serif"] = [f] + plt.rcParams["font.sans-serif"]
        break

# 避免负号显示成方块
plt.rcParams["axes.unicode_minus"] = False

# ---------------- Style ----------------
plt.rcParams.update({
    "figure.figsize": (9, 5.2),
    "figure.dpi": 150,
    "savefig.dpi": 300,
    "axes.spines.top": False,
    "axes.spines.right": False,
    "axes.grid": True,
    "grid.linestyle": "--",
    "grid.alpha": 0.3,
    "font.size": 11,
    # "axes.titleweight": "bold",
    # "axes.labelweight": "semibold",
    "axes.titlesize": 13,
})
plt.rcParams["axes.unicode_minus"] = False

# ---------------- Utils ----------------
def standardize_columns(df: pd.DataFrame) -> pd.DataFrame:
    df = df.copy()
    df.columns = [str(c).strip().replace(" ", "_") for c in df.columns]
    return df

def detect_date_col(df: pd.DataFrame):
    cands = ["date", "时间", "日期", "Time", "time"]
    low = {c.lower(): c for c in df.columns}
    for c in cands:
        if c.lower() in low:
            return low[c.lower()]
    for c in df.columns:
        s = pd.to_datetime(df[c], errors="coerce")
        if s.notna().sum() >= max(3, int(0.5*len(s))):
            return c
    return df.columns[0]

def parse_dates(s: pd.Series) -> pd.Series:
    def try_one(x):
        for fmt in ("%Y-%m-%d","%Y/%m/%d","%Y.%m.%d","%Y-%m","%Y/%m","%Y.%m","%Y%m%d","%Y%m"):
            try:
                return datetime.strptime(str(x), fmt)
            except Exception:
                continue
        return pd.to_datetime(x, errors="coerce")
    return s.apply(try_one)

def detect_y_col(df: pd.DataFrame):
    cands = ["employment_rate", "employment", "就业率", "整体就业率", "y", "Y"]
    low = {c.lower(): c for c in df.columns}
    for c in cands:
        if c.lower() in low:
            return low[c.lower()]
    num = df.select_dtypes(include=[np.number]).columns.tolist()
    return num[-1] if num else df.columns[-1]

def partial_corr_matrix(df_numeric: pd.DataFrame) -> pd.DataFrame:
    X = df_numeric.dropna().values
    if X.shape[0] < X.shape[1] + 2:
        return pd.DataFrame(np.nan, index=df_numeric.columns, columns=df_numeric.columns)
    S = np.cov(X, rowvar=False)
    try:
        P = np.linalg.pinv(S)
    except Exception:
        return pd.DataFrame(np.nan, index=df_numeric.columns, columns=df_numeric.columns)
    D = np.sqrt(np.outer(np.diag(P), np.diag(P))) + 1e-12
    with np.errstate(divide='ignore', invalid='ignore'):
        PC = -P / D
    np.fill_diagonal(PC, 1.0)
    return pd.DataFrame(PC, index=df_numeric.columns, columns=df_numeric.columns)

def cross_corr_series(x: np.ndarray, y: np.ndarray, max_lag: int):
    x = np.asarray(x, dtype=float)
    y = np.asarray(y, dtype=float)
    x = (x - np.nanmean(x)) / (np.nanstd(x) + 1e-12)
    y = (y - np.nanmean(y)) / (np.nanstd(y) + 1e-12)
    lags = np.arange(-max_lag, max_lag + 1)
    out = []
    for L in lags:
        if L < 0:
            xs = x[-L:]; ys = y[:len(xs)]
        elif L > 0:
            ys = y[L:]; xs = x[:len(ys)]
        else:
            xs, ys = x, y
        out.append(np.corrcoef(xs, ys)[0, 1] if len(xs) >= 3 else np.nan)
    return lags, np.array(out)

def nice_save(fig, outdir: Path, name: str):
    outdir.mkdir(parents=True, exist_ok=True)
    fig.savefig(outdir / f"{name}.png", bbox_inches="tight")
    fig.savefig(outdir / f"{name}.pdf", bbox_inches="tight")

def heatmap(ax, data: pd.DataFrame, title: str = "", vmin=-1, vmax=1, cmap="coolwarm"):
    im = ax.imshow(data.values, vmin=vmin, vmax=vmax, cmap=cmap, aspect="auto")
    ax.set_xticks(range(data.shape[1]))
    ax.set_yticks(range(data.shape[0]))
    ax.set_xticklabels(data.columns, rotation=45, ha="right")
    ax.set_yticklabels(data.index)
    ax.set_title(title)
    for i in range(data.shape[0]):
        for j in range(data.shape[1]):
            val = data.iat[i, j]
            ax.text(j, i, "NA" if pd.isna(val) else f"{val:.2f}", ha="center", va="center", fontsize=9)
    plt.colorbar(im, ax=ax, fraction=0.046, pad=0.04)

def diverging_bars(ax, s: pd.Series, title: str):
    s_sorted = s.sort_values(ascending=True)
    ax.barh(s_sorted.index, s_sorted.values)
    ax.axvline(0, linewidth=1)
    ax.set_title(title)
    ax.set_xlabel("Correlation")
    ax.grid(True, axis="x")

def regression_line(x, y):
    mask = ~(np.isnan(x) | np.isnan(y))
    x, y = x[mask], y[mask]
    if len(x) < 3: return None, None
    A = np.vstack([x, np.ones_like(x)]).T
    beta, *_ = np.linalg.lstsq(A, y, rcond=None)
    return beta[0], beta[1]

# ---------------- Alignment ----------------
def align_data(emp: pd.DataFrame, var: pd.DataFrame, mode: str, ycol: str,
               tolerance_days: int = 30, month_agg: str = "mean", quarter_agg: str = "mean",
               outer_freq: str = "M", outer_limit: int = 1):
    emp = emp.copy(); var = var.copy()
    # parse & sort
    emp["date"] = parse_dates(emp["date"]); var["date"] = parse_dates(var["date"])
    emp = emp.dropna(subset=["date"]).sort_values("date")
    var = var.dropna(subset=["date"]).sort_values("date")
    # ensure y is numeric
    emp[ycol] = pd.to_numeric(emp[ycol], errors="coerce")
    emp = emp.dropna(subset=[ycol])

    # keep numeric vars
    var_num = var.drop(columns=["date"]).select_dtypes(include=[np.number])
    var = pd.concat([var[["date"]], var_num], axis=1)

    if mode == "nearest":
        # merge_asof: left=emp, right=var; nearest within tolerance
        tol = pd.Timedelta(days=int(tolerance_days))
        merged = pd.merge_asof(emp.sort_values("date"),
                               var.sort_values("date"),
                               on="date", direction="nearest", tolerance=tol)
        merged = merged.dropna()
        return merged

    if mode == "month":
        emp["month"] = emp["date"].dt.to_period("M")
        var["month"] = var["date"].dt.to_period("M")
        gemp = emp.groupby("month", as_index=False)[ycol].agg(month_agg)
        gvar = var.groupby("month", as_index=False).agg(month_agg)
        df = pd.merge(gemp, gvar, on="month", how="inner").rename(columns={"month":"date"})
        df["date"] = df["date"].dt.to_timestamp()
        return df.dropna()

    if mode == "quarter":
        emp["quarter"] = emp["date"].dt.to_period("Q")
        var["quarter"] = var["date"].dt.to_period("Q")
        gemp = emp.groupby("quarter", as_index=False)[ycol].agg(quarter_agg)
        gvar = var.groupby("quarter", as_index=False).agg(quarter_agg)
        df = pd.merge(gemp, gvar, on="quarter", how="inner").rename(columns={"quarter":"date"})
        df["date"] = df["date"].dt.to_timestamp()
        return df.dropna()

    if mode == "outer_ffill":
        # outer union of dates, then resample to a grid, ffill/bfill within limit
        idx = pd.Index(sorted(set(emp["date"]) | set(var["date"])), name="date")
        emp2 = emp.set_index("date").reindex(idx)
        var2 = var.set_index("date").reindex(idx)

        # resample to frequency
        emp2 = emp2.asfreq(outer_freq)
        var2 = var2.asfreq(outer_freq)

        emp2[ycol] = emp2[ycol].ffill(limit=outer_limit).bfill(limit=outer_limit)
        for c in var2.columns:
            if c == ycol: continue
            var2[c] = var2[c].ffill(limit=outer_limit).bfill(limit=outer_limit)

        df = pd.concat([emp2[[ycol]], var2.drop(columns=[col for col in var2.columns if col==ycol], errors='ignore')], axis=1)
        df = df.dropna().reset_index()  # bring back date
        return df

    raise ValueError(f"Unknown align mode: {mode}")

# ---------------- Main ----------------
def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--emp", type=str, default="/Users/linshangjin/25CCM/NKU-C/t2/code_2/employ_rate.csv")
    ap.add_argument("--vars", type=str, default="/Users/linshangjin/25CCM/NKU-C/t2/code_2/all_vars_yoy.csv")
    ap.add_argument("--datecol_emp", type=str, default=None)
    ap.add_argument("--datecol_vars", type=str, default=None)
    ap.add_argument("--ycol", type=str, default=None)
    ap.add_argument("--align", type=str, default="nearest",
                    choices=["nearest", "month", "quarter", "outer_ffill"])
    ap.add_argument("--tolerance_days", type=int, default=30)
    ap.add_argument("--freq", type=str, default="M", help="for outer_ffill: resample freq, e.g., M, W, Q")
    ap.add_argument("--limit", type=int, default=1, help="for outer_ffill: ffill/bfill limit")
    ap.add_argument("--outdir", type=str, default="output_corr_aligned")
    ap.add_argument("--max_lag", type=int, default=6)
    args = ap.parse_args()

    out_root = Path(args.outdir); tabs = out_root/"tables"; figs = out_root/"figs"
    tabs.mkdir(parents=True, exist_ok=True); figs.mkdir(parents=True, exist_ok=True)

    emp = standardize_columns(pd.read_csv(args.emp))
    var = standardize_columns(pd.read_csv(args.vars))

    # unify date col names to 'date'
    d_emp = args.datecol_emp or detect_date_col(emp)
    d_var = args.datecol_vars or detect_date_col(var)
    if d_emp != "date": emp = emp.rename(columns={d_emp: "date"})
    if d_var != "date": var = var.rename(columns={d_var: "date"})

    ycol = args.ycol or detect_y_col(emp)

    df = align_data(emp, var, mode=args.align, ycol=ycol,
                    tolerance_days=args.tolerance_days,
                    outer_freq=args.freq, outer_limit=args.limit)

    if len(df) < 5:
        raise ValueError(f"Aligned rows too few: {len(df)}. Try a different --align mode or loosen tolerance.")

    # numeric matrix
    num_df = df.drop(columns=["date"]).apply(pd.to_numeric, errors="coerce")
    num_df = num_df.dropna().reset_index(drop=True)
    if ycol not in num_df.columns:
        raise ValueError(f"ycol '{ycol}' is not numeric or missing after alignment.")

    # correlations
    pearson = num_df.corr(method="pearson")
    spearman = num_df.corr(method="spearman")
    pearson.to_csv(tabs/"corr_pearson_full.csv", encoding="utf-8-sig")
    spearman.to_csv(tabs/"corr_spearman_full.csv", encoding="utf-8-sig")

    # with y
    others = [c for c in num_df.columns if c != ycol]
    pearson_y = pearson[ycol].drop(index=ycol, errors="ignore") if ycol in pearson.columns else pd.Series(dtype=float)
    spearman_y = spearman[ycol].drop(index=ycol, errors="ignore") if ycol in spearman.columns else pd.Series(dtype=float)
    pd.DataFrame({"pearson": pearson_y, "spearman": spearman_y}).to_csv(tabs/"corr_with_y.csv", encoding="utf-8-sig")

    # partial
    pc = partial_corr_matrix(num_df)
    pc.to_csv(tabs/"partial_corr_full.csv", encoding="utf-8-sig")
    pc_y = pc[ycol].drop(index=ycol, errors="ignore") if ycol in pc.columns else pd.Series(dtype=float)
    pc_y.to_csv(tabs/"partial_corr_with_y.csv", encoding="utf-8-sig")

    # cross-correlation (use aligned row order as time)
    max_lag = int(args.max_lag)
    lags = np.arange(-max_lag, max_lag + 1)
    xcorr = []
    for c in others:
        L, cc = cross_corr_series(num_df[c].values, num_df[ycol].values, max_lag=max_lag)
        xcorr.append(cc)
    xcorr_df = pd.DataFrame(xcorr, index=others, columns=lags)
    xcorr_df.to_csv(tabs/"cross_corr_y_vs_vars.csv", encoding="utf-8-sig")

    # figs
    if pearson.shape[0] > 0:
        fig, ax = plt.subplots(figsize=(10.5, 7))
        heatmap(ax, pearson, title="Pearson Correlation (Aligned)")
        nice_save(fig, figs, "heatmap_pearson_full")
        plt.close(fig)
    if spearman.shape[0] > 0:
        fig, ax = plt.subplots(figsize=(10.5, 7))
        heatmap(ax, spearman, title="Spearman Correlation (Aligned)")
        nice_save(fig, figs, "heatmap_spearman_full")
        plt.close(fig)
    if not pearson_y.empty:
        fig, ax = plt.subplots(figsize=(9, 6.5))
        diverging_bars(ax, pearson_y, title=f"Pearson Corr with {ycol}")
        nice_save(fig, figs, "bars_corr_with_y_pearson")
        plt.close(fig)
    if not spearman_y.empty:
        fig, ax = plt.subplots(figsize=(9, 6.5))
        diverging_bars(ax, spearman_y, title=f"Spearman Corr with {ycol}")
        nice_save(fig, figs, "bars_corr_with_y_spearman")
        plt.close(fig)
    if len(others) > 0:
        fig, ax = plt.subplots(figsize=(10.5, max(4, 0.4*len(others)+2)))
        hm = ax.imshow(xcorr_df.values, aspect="auto", vmin=-1, vmax=1, cmap="coolwarm")
        ax.set_xticks(range(len(lags))); ax.set_xticklabels(lags)
        ax.set_yticks(range(len(others))); ax.set_yticklabels(others)
        ax.set_xlabel("Lag (rows): +L means factor leads Y by L")
        ax.set_title(f"Cross-correlation vs {ycol}")
        plt.colorbar(hm, ax=ax, fraction=0.046, pad=0.04)
        for i in range(len(others)):
            row = xcorr_df.iloc[i].values
            if np.all(np.isnan(row)): continue
            j = int(np.nanargmax(np.abs(row)))
            ax.text(j, i, f"{row[j]:.2f}", ha="center", va="center", fontsize=9,
                    bbox=dict(boxstyle="round,pad=0.2", fc="white", ec="none", alpha=0.7))
        nice_save(fig, figs, "heatmap_cross_corr_lags")
        plt.close(fig)

    # summary
    summary = pd.DataFrame({"pearson": pearson_y, "spearman": spearman_y, "partial": pc_y})
    if not summary.empty:
        (summary.assign(abs_pearson=lambda d: d["pearson"].abs())
                .sort_values("abs_pearson", ascending=False)
                .drop(columns=["abs_pearson"])
        ).to_csv(tabs/"summary_top_by_abs_pearson.csv", encoding="utf-8-sig")

    best_lags = []
    for c in others:
        row = xcorr_df.loc[c]
        j = int(np.nanargmax(np.abs(row.values)))
        best_lags.append({"variable": c, "best_lag": int(row.index[j]), "xcorr": float(row.values[j])})
    if best_lags:
        pd.DataFrame(best_lags).sort_values("xcorr", key=lambda s: s.abs(), ascending=False)\
            .to_csv(tabs/"summary_best_lags.csv", index=False, encoding="utf-8-sig")

    print(f"[Done] Outputs -> {out_root.resolve()}")
    print(" - tables:", [p.name for p in tabs.glob('*.csv')])
    print(" - figs  :", [p.name for p in figs.glob('*.png')])

if __name__ == "__main__":
    main()
