def print_pca_stats(df_pca, year_field):
    """
    打印主成分得分随年份变化、分布统计、按性别分组均值
    """
    pc1_mean = df_pca.groupby(year_field)["PC1"].mean()
    pc2_mean = df_pca.groupby(year_field)["PC2"].mean()
    print("\nPC1均值随年份：")
    print(tabulate(pc1_mean.reset_index(), headers=['年份', 'PC1均值'], tablefmt='github', floatfmt=".3f"))
    print("PC2均值随年份：")
    print(tabulate(pc2_mean.reset_index(), headers=['年份', 'PC2均值'], tablefmt='github', floatfmt=".3f"))

    print("\n【PC1/PC2分布（箱线统计）】")
    for pc in ["PC1", "PC2"]:
        desc = df_pca.groupby(year_field)[pc].describe()[["count", "mean", "std", "min", "25%", "50%", "75%", "max"]]
        print(f"\n主成分：{pc}")
        print(tabulate(desc, headers='keys', tablefmt='github', floatfmt=".3f"))

    if "性别" in df_pca.columns:
        print("\n【PC1均值随年份变化（按性别）】")
        pc1_gender = df_pca.groupby([year_field, "性别"])["PC1"].mean().unstack()
        print(tabulate(pc1_gender, headers='keys', tablefmt='github', floatfmt=".3f"))

def print_cohort_stats(city_df, df_pca, score_fields, year_field):
    """
    cohort相关描述性统计与显著性检验
    """
    print("\n【城市样本 cohort（出生队列分组）描述性统计】")
    if "出生队列分组" in city_df.columns:
        cohort_field = "出生队列分组"
        cohort_mean = city_df.groupby(cohort_field)[score_fields].mean()
        print("\n【各cohort四项计分均值（城市样本）】")
        print(tabulate(cohort_mean, headers='keys', tablefmt='github', floatfmt=".2f"))

        if not df_pca.empty and cohort_field in df_pca.columns:
            pc1_cohort_mean = df_pca.groupby(cohort_field)["PC1"].mean()
            print("\n【各cohort主成分PC1均值（城市样本）】")
            print(tabulate(pc1_cohort_mean.reset_index(), headers=[cohort_field, 'PC1均值'], tablefmt='github', floatfmt=".3f"))

        cohort_year_mean = city_df.groupby([cohort_field, year_field])[score_fields].mean()
        print("\n【各cohort在不同年份的四项计分均值（城市样本）】")
        print(tabulate(cohort_year_mean, headers='keys', tablefmt='github', floatfmt=".2f"))

        if not df_pca.empty and cohort_field in df_pca.columns:
            pc1_cohort_year_mean = df_pca.groupby([cohort_field, year_field])["PC1"].mean().unstack()
            print("\n【各cohort主成分PC1均值随年份（城市样本）】")
            print(tabulate(pc1_cohort_year_mean, headers='keys', tablefmt='github', floatfmt=".3f"))

        # 双因素方差分析（Two-way ANOVA）
        import statsmodels.api as sm
        from statsmodels.formula.api import ols
        print("\n【cohort × 年份 双因素方差分析（Two-way ANOVA）】")
        for field in score_fields:
            formula = f'Q("{field}") ~ C(Q("{cohort_field}")) + C(Q("{year_field}")) + C(Q("{cohort_field}")):C(Q("{year_field}"))'
            model = ols(formula, data=city_df).fit()
            anova_table = sm.stats.anova_lm(model, typ=2)
            print(f"\n字段：{field}")
            print(tabulate(anova_table, headers='keys', tablefmt='github', floatfmt=".4f"))

        if not df_pca.empty and cohort_field in df_pca.columns:
            formula = f'PC1 ~ C(Q("{cohort_field}")) + C(Q("{year_field}")) + C(Q("{cohort_field}")):C(Q("{year_field}"))'
            model = ols(formula, data=df_pca).fit()
            anova_table = sm.stats.anova_lm(model, typ=2)
            print("\n【主成分PC1的双因素方差分析】")
            print(tabulate(anova_table, headers='keys', tablefmt='github', floatfmt=".4f"))

        print("\n【cohort × 年份 分组样本量】")
        print(city_df.groupby([cohort_field, year_field]).size())
    else:
        print("未找到 cohort 字段，无法进行 cohort 相关分析。")
"""
核心功能函数模块
- 仅实现与 config.yaml 参数解耦的核心逻辑
"""

import os
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tabulate import tabulate
import json

from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def pca_analysis(city_df, score_fields, year_field):
    """
    主成分分析（PCA），返回主成分得分表和载荷矩阵，并打印方差解释
    city_df: DataFrame
    score_fields: list
    year_field: str
    """
    print("\n【主成分分析（PCA）】")
    pca_data = city_df[score_fields].dropna()
    scaler = StandardScaler()
    X_std = scaler.fit_transform(pca_data)
    pca = PCA(n_components=2)
    X_pca = pca.fit_transform(X_std)
    df_pca = pca_data.copy()
    df_pca["PC1"] = X_pca[:, 0]
    df_pca["PC2"] = X_pca[:, 1]
    df_pca[year_field] = city_df.loc[pca_data.index, year_field].values
    if "出生队列分组" in city_df.columns:
        df_pca["出生队列分组"] = city_df.loc[pca_data.index, "出生队列分组"].values
    if "性别" in city_df.columns:
        df_pca["性别"] = city_df.loc[pca_data.index, "性别"].values

    # 主成分载荷
    loadings = pd.DataFrame(
        pca.components_.T,
        index=score_fields,
        columns=["PC1", "PC2"]
    )
    print("主成分载荷（Component Loadings）:")
    print(tabulate(loadings, headers='keys', tablefmt='github', floatfmt=".3f"))

    # 主成分方差解释
    print("主成分方差解释比例（Explained Variance Ratio）:")
    var_table = [[f"PC{i+1}", f"{ratio:.2%}"] for i, ratio in enumerate(pca.explained_variance_ratio_)]
    print(tabulate(var_table, headers=["主成分", "方差解释比例"], tablefmt='github'))
    return df_pca, loadings
def ensure_dir(path):
    if not os.path.exists(path):
        os.makedirs(path)

def save_figure(fig, filename, fig_dir="figures"):
    ensure_dir(fig_dir)
    path = os.path.join(fig_dir, filename)
    fig.savefig(path, dpi=300, bbox_inches='tight')
    print(f"图片已保存：{path}")
    return path

def show_all_images(image_paths):
    for path in image_paths:
        try:
            img = Image.open(path)
            img.show()
        except Exception as e:
            print(f"图片 {path} 打开失败：{e}")


def get_mean_table(df, score_fields, year_field):
    """分组均值统计"""
    return df.groupby(year_field)[score_fields].mean()

def print_mean_table(mean_table):
    print("\n【各年份四项计分均值】")
    print(tabulate(mean_table, headers='keys', tablefmt='github', floatfmt=".2f"))

def get_box_stats(df, score_fields, year_field):
    """分布对比（箱线统计）"""
    stats = {}
    for field in score_fields:
        desc = df.groupby(year_field)[field].describe()[["count", "mean", "std", "min", "25%", "50%", "75%", "max"]]
        stats[field] = desc
    return stats

def print_box_stats(stats):
    print("\n【各年份四项计分分布（箱线统计）】")
    for field, desc in stats.items():
        print(f"\n字段：{field}")
        print(tabulate(desc, headers='keys', tablefmt='github', floatfmt=".2f"))

def anova_by_year(df, score_fields, year_field):
    """方差分析（ANOVA）"""
    from scipy.stats import f_oneway
    results = []
    for field in score_fields:
        groups = [g.dropna().values for name, g in df.groupby(year_field)[field]]
        if all(len(g) > 1 for g in groups):
            fval, pval = f_oneway(*groups)
            results.append([field, f"{fval:.2f}", f"{pval:.4f}"])
        else:
            results.append([field, "数据不足", "数据不足"])
    return results

def print_anova_results(results):
    print("\n【方差分析（ANOVA）结果】")
    print(tabulate(results, headers=["字段", "F值", "p值"], tablefmt='github'))

def load_field_stats(json_path):
    """加载字段统计信息（如 json 文件）"""
    if pd.notnull(json_path) and isinstance(json_path, str) and json_path:
        try:
            with open(json_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            if isinstance(data, dict) and "字段统计信息" in data:
                return data["字段统计信息"]
        except Exception as e:
            print(f"加载字段统计信息文件失败：{e}")
    return {}

def load_and_filter_city(csv_path, city_col="城乡", city_value="城市"):
    """读取 CSV 数据，筛选城市样本"""
    try:
        df = pd.read_csv(csv_path, low_memory=False)
        print(f"CSV文件 {csv_path} 读取成功，数据量：{df.shape[0]} 行，{df.shape[1]} 列。")
    except Exception as e:
        print(f"CSV文件读取失败：{e}")
        return None
    if city_col in df.columns:
        city_df = df[df[city_col].astype(str).str.strip() == city_value].copy()
        print(f"城市样本筛选后行数: {len(city_df)}")
        return city_df
    else:
        print(f"未找到 {city_col} 字段，无法筛选城市样本。")
        return None

def get_missing_and_unique_stats(df):
    """统计缺失率、唯一值数量，输出字段示例值"""
    missing_values = df.isnull().sum()
    unique_counts = df.nunique()
    total = len(df)
    missing_rate = missing_values / total
    return missing_rate, unique_counts
