from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
def pca_analysis(city_df, score_fields, year_field):
    """
    主成分分析（PCA），返回主成分得分表和载荷矩阵，并打印方差解释
    city_df: DataFrame
    score_fields: list
    year_field: str
    """
    print("\n【主成分分析（PCA）】")
    pca_data = city_df[score_fields].dropna()
    scaler = StandardScaler()
    X_std = scaler.fit_transform(pca_data)
    pca = PCA(n_components=2)
    X_pca = pca.fit_transform(X_std)
    df_pca = pca_data.copy()
    df_pca["PC1"] = X_pca[:, 0]
    df_pca["PC2"] = X_pca[:, 1]
    df_pca[year_field] = city_df.loc[pca_data.index, year_field].values
    if "出生队列分组" in city_df.columns:
        df_pca["出生队列分组"] = city_df.loc[pca_data.index, "出生队列分组"].values
    if "性别" in city_df.columns:
        df_pca["性别"] = city_df.loc[pca_data.index, "性别"].values

    # 主成分载荷
    loadings = pd.DataFrame(
        pca.components_.T,
        index=score_fields,
        columns=["PC1", "PC2"]
    )
    print("主成分载荷（Component Loadings）:")
    print(tabulate(loadings, headers='keys', tablefmt='github', floatfmt=".3f"))

    # 主成分方差解释
    print("主成分方差解释比例（Explained Variance Ratio）:")
    var_table = [[f"PC{i+1}", f"{ratio:.2%}"] for i, ratio in enumerate(pca.explained_variance_ratio_)]
    print(tabulate(var_table, headers=["主成分", "方差解释比例"], tablefmt='github'))
    return df_pca, loadings
"""
核心功能函数模块
- 仅实现与 config.yaml 参数解耦的核心逻辑
"""

import os
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tabulate import tabulate
import json

def ensure_dir(path):
    if not os.path.exists(path):
        os.makedirs(path)

def save_figure(fig, filename, fig_dir="figures"):
    ensure_dir(fig_dir)
    path = os.path.join(fig_dir, filename)
    fig.savefig(path, dpi=300, bbox_inches='tight')
    print(f"图片已保存：{path}")
    return path

def show_all_images(image_paths):
    for path in image_paths:
        try:
            img = Image.open(path)
            img.show()
        except Exception as e:
            print(f"图片 {path} 打开失败：{e}")


def get_mean_table(df, score_fields, year_field):
    """分组均值统计"""
    return df.groupby(year_field)[score_fields].mean()

def print_mean_table(mean_table):
    print("\n【各年份四项计分均值】")
    print(tabulate(mean_table, headers='keys', tablefmt='github', floatfmt=".2f"))

def get_box_stats(df, score_fields, year_field):
    """分布对比（箱线统计）"""
    stats = {}
    for field in score_fields:
        desc = df.groupby(year_field)[field].describe()[["count", "mean", "std", "min", "25%", "50%", "75%", "max"]]
        stats[field] = desc
    return stats

def print_box_stats(stats):
    print("\n【各年份四项计分分布（箱线统计）】")
    for field, desc in stats.items():
        print(f"\n字段：{field}")
        print(tabulate(desc, headers='keys', tablefmt='github', floatfmt=".2f"))

def anova_by_year(df, score_fields, year_field):
    """方差分析（ANOVA）"""
    from scipy.stats import f_oneway
    results = []
    for field in score_fields:
        groups = [g.dropna().values for name, g in df.groupby(year_field)[field]]
        if all(len(g) > 1 for g in groups):
            fval, pval = f_oneway(*groups)
            results.append([field, f"{fval:.2f}", f"{pval:.4f}"])
        else:
            results.append([field, "数据不足", "数据不足"])
    return results

def print_anova_results(results):
    print("\n【方差分析（ANOVA）结果】")
    print(tabulate(results, headers=["字段", "F值", "p值"], tablefmt='github'))

def load_field_stats(json_path):
    """加载字段统计信息（如 json 文件）"""
    if pd.notnull(json_path) and isinstance(json_path, str) and json_path:
        try:
            with open(json_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            if isinstance(data, dict) and "字段统计信息" in data:
                return data["字段统计信息"]
        except Exception as e:
            print(f"加载字段统计信息文件失败：{e}")
    return {}

def load_and_filter_city(csv_path, city_col="城乡", city_value="城市"):
    """读取 CSV 数据，筛选城市样本"""
    try:
        df = pd.read_csv(csv_path, low_memory=False)
        print(f"CSV文件 {csv_path} 读取成功，数据量：{df.shape[0]} 行，{df.shape[1]} 列。")
    except Exception as e:
        print(f"CSV文件读取失败：{e}")
        return None
    if city_col in df.columns:
        city_df = df[df[city_col].astype(str).str.strip() == city_value].copy()
        print(f"城市样本筛选后行数: {len(city_df)}")
        return city_df
    else:
        print(f"未找到 {city_col} 字段，无法筛选城市样本。")
        return None

def get_missing_and_unique_stats(df):
    """统计缺失率、唯一值数量，输出字段示例值"""
    missing_values = df.isnull().sum()
    unique_counts = df.nunique()
    total = len(df)
    missing_rate = missing_values / total
    return missing_rate, unique_counts
