"""APC 复健流水线脚本：
- 重新拟合 APC / full / full_inter，确保 posterior_predictive 已并入 InferenceData
- 对 income 做 log1p 后重复 APC 拟合
- 用 Student-t 误差重跑 APC
- 对所有模型运行 model_diagnostics（保存 PPC 与残差图）
- 用 PSIS-LOO 比较模型并保存 Pareto-k 与比较表

用法: 在项目根目录激活 venv 后运行：
python scripts/apc_robust_pipeline.py
"""
import os
import sys
import yaml
import argparse

# Ensure project root is on sys.path so `import src` works when running from scripts/
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if ROOT not in sys.path:
    sys.path.insert(0, ROOT)
import numpy as np
import arviz as az
import pymc as pm
from src.core_utils import (
    generate_apc_data,
    preprocess_data,
    build_and_sample_apc_model,
    build_and_sample_full_model,
    build_and_sample_full_model_with_interaction,
    _attach_posterior_predictive,
    save_inferencedata,
    model_diagnostics,
    print_full_model_diagnosis,
)
from matplotlib.font_manager import FontProperties
from utils.plot_style import set_plot_style, set_chinese_font
import xarray as xr
from scipy import stats

OUT_DIR = "outputs"
FIG_DIR = "figures"
os.makedirs(OUT_DIR, exist_ok=True)
os.makedirs(FIG_DIR, exist_ok=True)

# parse CLI args for a fast smoke-test mode
parser = argparse.ArgumentParser(description='APC robust pipeline (supports --fast smoke-test)')
parser.add_argument('--fast', action='store_true', help='Run a fast smoke-test (smaller draws, skip LOO)')
args = parser.parse_args()

# load plot configuration from config.yaml if present
try:
    cfg = yaml.safe_load(open('config.yaml')) or {}
except Exception:
    cfg = {}

# If fast mode requested, override sampling-heavy settings for a quick run
if args.fast:
    print('FAST MODE: applying quick-sample overrides (draws/tune/chains reduced, skip heavy checks)')
    # apply sensible fast defaults but allow explicit cfg keys to override
    cfg.setdefault('draws', 100)
    cfg.setdefault('tune', 100)
    cfg.setdefault('chains', 1)
    cfg.setdefault('parallel', False)
    cfg.setdefault('run_advanced_checks', False)
plot_cfg = cfg.get('plot', None)
set_plot_style(plot_cfg)
if plot_cfg and plot_cfg.get('font_family'):
    set_chinese_font(plot_cfg.get('font_family'))

# font prop for functions that accept it
font_family = (plot_cfg.get('font_family') if plot_cfg else None) or 'Heiti TC'
try:
    font_prop = FontProperties(family=font_family)
except Exception:
    font_prop = None


def run_and_save(name, model_runner, *args, **kwargs):
    print(f"\n--- Running {name} ---")
    idata = model_runner(*args, **kwargs)
    # some runners return trace directly, others return (trace, ...)
    if isinstance(idata, tuple) or isinstance(idata, list):
        idata = idata[0]
    # ensure posterior_predictive exists: if not, try to sample and attach
    try:
        if not (hasattr(idata, "posterior_predictive") and idata.posterior_predictive is not None):
            print(f"{name}: posterior_predictive missing, sampling posterior predictive...")
            # attempt to draw posterior_predictive from model in idata.attrs if model saved (unlikely)
            # fallback: try to sample posterior predictive via pm.sample_posterior_predictive using inference_data
            # Use pm.sample_posterior_predictive with posterior=idata.posterior
            try:
                with pm.Model():
                    # this will try sampling posterior predictive using the sample from idata
                    pp = pm.sample_posterior_predictive(idata, return_inferencedata=True)
                    idata = _attach_posterior_predictive(idata, pp)
            except Exception as e:
                print(f"Failed to sample posterior_predictive automatically: {e}")
    except Exception:
        pass
    save_inferencedata(idata, name, out_dir=OUT_DIR)
    # ensure log_likelihood present for LOO by adding pointwise loglik if missing
    try:
        idata = add_pointwise_log_likelihood(idata, args[0])
        save_inferencedata(idata, name, out_dir=OUT_DIR)
    except Exception as e:
        print(f"Could not add pointwise log_likelihood for {name}: {e}")
    model_diagnostics(idata, name, fig_save_path=FIG_DIR)
    print_full_model_diagnosis(idata)
    return idata


def build_apc_studentt(data, config):
    # build APC with Student-t likelihood
    n_ages = len(data["age_group"].cat.categories)
    n_periods = len(data["period_code"].unique())
    n_cohorts = len(data["cohort_code"].unique())
    age_idx = data["age_group"].cat.codes.values
    period_idx = data["period_code"].values
    cohort_idx = data["cohort_code"].values
    y = data["income"].values
    with pm.Model() as apc_model:
        mu = pm.Normal("mu", mu=0, sigma=100)
        if config.get("sum_to_zero", False):
            age_raw = pm.Normal("age_raw", mu=0, sigma=50, shape=n_ages)
            age_effect = pm.Deterministic("age_effect", age_raw - pm.math.mean(age_raw))
            period_raw = pm.Normal("period_raw", mu=0, sigma=50, shape=n_periods)
            period_effect = pm.Deterministic("period_effect", period_raw - pm.math.mean(period_raw))
            cohort_raw = pm.Normal("cohort_raw", mu=0, sigma=50, shape=n_cohorts)
            cohort_effect = pm.Deterministic("cohort_effect", cohort_raw - pm.math.mean(cohort_raw))
        else:
            age_effect = pm.Normal("age_effect", mu=0, sigma=50, shape=n_ages)
            period_effect = pm.Normal("period_effect", mu=0, sigma=50, shape=n_periods)
            cohort_effect = pm.Normal("cohort_effect", mu=0, sigma=50, shape=n_cohorts)
        mu_linear = mu + age_effect[age_idx] + period_effect[period_idx] + cohort_effect[cohort_idx]
        sigma = pm.HalfCauchy("sigma", beta=50)
        nu = pm.Exponential("nu_minus_one", 1/29.0) + 1  # weak prior, mean about 30
        y_obs = pm.StudentT("y_obs", nu=nu, mu=mu_linear, sigma=sigma, observed=y)
        trace = pm.sample(
            draws=config.get("draws", 1000),
            tune=config.get("tune", 1000),
            chains=config.get("chains", 2),
            cores=config.get("n_threads") if config.get("parallel") else config.get("cores", 1),
            return_inferencedata=True,
            random_seed=config.get("random_seed", 42),
        )
        pp = pm.sample_posterior_predictive(trace, model=apc_model, return_inferencedata=True)
        trace = _attach_posterior_predictive(trace, pp)
    return trace


def add_pointwise_log_likelihood(idata, data, obs_name='y_obs'):
    """Compute and attach pointwise log_likelihood for observed y (y_obs).

    Works for models that expose posterior variables named like in this project
    (mu, age_effect, period_effect, cohort_effect, sigma, and optional betas).
    """
    # skip if already present
    if hasattr(idata, 'log_likelihood') and getattr(idata, 'log_likelihood') is not None:
        return idata

    posterior = idata.posterior
    chains, draws = posterior.sizes['chain'], posterior.sizes['draw']
    y = data['income'].values
    n_obs = len(y)

    # helper to get var if exists
    def get_var(name):
        return posterior[name].values if name in posterior.data_vars else None

    mu = get_var('mu')  # (chain, draw)
    age_eff = get_var('age_effect')  # (chain, draw, n_ages)
    period_eff = get_var('period_effect')
    cohort_eff = get_var('cohort_effect')
    sigma = get_var('sigma')
    # optional covariates
    beta_gender = get_var('beta_gender')
    beta_region = get_var('beta_region')
    beta_education = get_var('beta_education')
    beta_age_edu = get_var('beta_age_edu')

    # index arrays from data
    age_idx = data['age_group'].cat.codes.values
    period_idx = data['period_code'].values
    cohort_idx = data['cohort_code'].values
    gender = data['gender_code'].values if 'gender_code' in data.columns else None
    region = data['region_code'].values if 'region_code' in data.columns else None
    education = data['education'].values if 'education' in data.columns else None

    # prepare container
    loglik = np.zeros((chains, draws, n_obs))

    for ci in range(chains):
        for di in range(draws):
            # start with mu
            mu_val = mu[ci, di] if mu is not None else 0.0
            # add effects if exist
            lin = np.full(n_obs, mu_val)
            if age_eff is not None:
                lin += age_eff[ci, di, age_idx]
            if period_eff is not None:
                lin += period_eff[ci, di, period_idx]
            if cohort_eff is not None:
                lin += cohort_eff[ci, di, cohort_idx]
            if beta_gender is not None and gender is not None:
                lin += beta_gender[ci, di] * gender
            if beta_region is not None and region is not None:
                lin += beta_region[ci, di] * region
            if beta_education is not None and education is not None:
                lin += beta_education[ci, di] * education
            if beta_age_edu is not None and education is not None:
                lin += beta_age_edu[ci, di] * (age_idx * education)

            # compute logpdf: normal or student-t if nu present
            if 'nu_minus_one' in posterior.data_vars or 'nu' in posterior.data_vars:
                # Student-t
                if 'nu_minus_one' in posterior.data_vars:
                    nu = posterior['nu_minus_one'].values[ci, di] + 1.0
                else:
                    nu = posterior['nu'].values[ci, di]
                s = sigma[ci, di] if sigma is not None else 1.0
                loglik[ci, di, :] = stats.t.logpdf(y, df=float(nu), loc=lin, scale=float(s))
            else:
                s = sigma[ci, di] if sigma is not None else 1.0
                # normal logpdf
                var = float(s) ** 2
                loglik[ci, di, :] = -0.5 * (np.log(2 * np.pi * var) + ((y - lin) ** 2) / var)

    # attach as xarray DataArray
    da = xr.DataArray(loglik, dims=('chain', 'draw', 'obs'))
    ds = xr.Dataset({obs_name: da})
    idata.add_groups({'log_likelihood': ds})
    return idata


def main():
    cfg = yaml.safe_load(open('config.yaml'))
    # generate synthetic data (or replace with real data loading)
    data = generate_apc_data(cfg)
    data = preprocess_data(data)

    # 1) APC (original)
    trace_apc = run_and_save('trace_apc', build_and_sample_apc_model, data, cfg)

    # 2) Full model
    trace_full = run_and_save('trace_full', build_and_sample_full_model, data, *([None,]*7), cfg) if False else None
    # Note: existing build_and_sample_full_model signature expects many args; instead call via wrapper below
    trace_full = run_and_save('trace_full', lambda d,c: build_and_sample_full_model(d, *build_and_sample_apc_model(d,c)[1:], c), data, cfg)

    # 3) Full with interaction
    trace_full_inter = run_and_save('trace_full_inter', lambda d,c: build_and_sample_full_model_with_interaction(d, *build_and_sample_apc_model(d,c)[1:], c), data, cfg)

    # 4) APC with log(income)
    data_log = data.copy()
    data_log['income'] = np.log1p(data_log['income'])
    data_log = preprocess_data(data_log)
    trace_apc_log = run_and_save('trace_apc_log', build_and_sample_apc_model, data_log, cfg)

    # 5) APC with Student-t
    trace_apc_t = run_and_save('trace_apc_studentt', build_apc_studentt, data, cfg)

    # 6) PSIS-LOO compare selected models
    print('\n--- Running PSIS-LOO model comparison ---')
    idatas = {
        'apc': trace_apc,
        'full': trace_full,
        'full_inter': trace_full_inter,
        'apc_log': trace_apc_log,
        'apc_studentt': trace_apc_t,
    }
    # compute loo and pareto-k stats
    loo_results = {}
    if args.fast:
        print('FAST MODE: skipping PSIS-LOO computation to save time')
        loo_results = {}
    else:
        for name, idata in idatas.items():
            if idata is None:
                continue
            try:
                print(f'Computing loo for {name}...')
                loo = az.loo(idata, pointwise=True)
                loo_results[name] = {
                    'loo': float(az.loo(idata)),
                    'pareto_k_mean': float(np.mean(loo.pareto_k.values)),
                    'pareto_k_high_fraction': float((loo.pareto_k.values > 0.7).mean()),
                }
            except Exception as e:
                print(f'LOO failed for {name}: {e}')
    # save loo results
    import json
    with open(os.path.join(OUT_DIR, 'loo_summary.json'), 'w') as f:
        json.dump(loo_results, f, indent=2)
    print('Saved LOO summary to outputs/loo_summary.json')

    # 7) az.compare using ic='loo' for models that have loo
    compare_map = {k: v for k, v in idatas.items() if v is not None}
    if args.fast:
        print('FAST MODE: skipping az.compare/model comparison (no LOO results)')
    else:
        try:
            cmp = az.compare(compare_map, ic='loo')
            cmp.to_csv(os.path.join(OUT_DIR, 'model_compare_loo.csv'))
            print('Saved model compare table to outputs/model_compare_loo.csv')
        except Exception as e:
            print(f'az.compare failed: {e}')

    # 8) simple markdown report
    with open(os.path.join(OUT_DIR, 'apc_integrity_report.md'), 'w') as f:
        f.write('# APC integrity report\n\n')
        f.write('Models run: apc, full, full_inter, apc_log, apc_studentt\n\n')
        if args.fast:
            f.write('FAST MODE: LOO and model comparison were skipped.\n')
        else:
            f.write('LOO summary (see outputs/loo_summary.json):\n')
            f.write(json.dumps(loo_results, indent=2))
    print('Saved basic report to outputs/apc_integrity_report.md')


if __name__ == '__main__':
    main()
