import os
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from sklearn.covariance import GraphicalLasso, LedoitWolf
from sklearn.preprocessing import StandardScaler
import scipy.stats
from scipy.stats import f_oneway, kruskal
import time
import seaborn as sns
import yaml

with open(os.path.join(os.path.dirname(__file__), '../config.yaml'), encoding='utf-8') as f:
    config = yaml.safe_load(f)
CORE_NAME_MAP = config.get('core_name_map_mgm', {})
CONTROL_NAME_MAP = config.get('control_name_map', {})

# 这里可以继续补充 mgm 分析相关函数和主流程
def preprocess_data(df, fields_core, control_vars):
    df = df[fields_core + control_vars].copy()
    # 年份字段只保留数字
    if "年份" in df.columns:
        df["年份"] = df["年份"].astype(str).str.extract(r'(\d{4})').astype(float)
    for col in df.columns:
        # 不编码年份字段
        if col != "年份" and (df[col].dtype == 'object' or df[col].dtype.name == 'category'):
            df[col] = pd.Categorical(df[col]).codes
    # 只对数值型字段填充缺失
    num_cols = df.select_dtypes(include=[np.number]).columns
    df[num_cols] = df[num_cols].fillna(df[num_cols].median())
    return df

def run_mgm(df, fields_core, control_vars, alpha=0.2, max_iter=1000):
    data = df[fields_core + control_vars].values
    data = StandardScaler().fit_transform(data)  # 新增：标准化
    model = GraphicalLasso(alpha=alpha, max_iter=max_iter)
    model.fit(data)
    adj = np.abs(model.precision_)
    return adj, fields_core + control_vars

def run_ledoitwolf(df, fields_core, control_vars):
    data = df[fields_core + control_vars].values
    model = LedoitWolf()
    model.fit(data)
    adj = np.abs(model.covariance_)
    return adj, fields_core + control_vars

def get_chinese_name(var):
    # 优先查核心变量映射，否则查控制变量映射，否则原名
    return CORE_NAME_MAP.get(var, CONTROL_NAME_MAP.get(var, var))

def build_network_from_adj(adj, var_names, df, threshold=0.10):
    G = nx.Graph()
    name_map = {v: CORE_NAME_MAP.get(v, CONTROL_NAME_MAP.get(v, v)) for v in var_names}
    for i, name_i in enumerate(var_names):
        if name_map[name_i] != "区县":  # 不添加区县节点
            G.add_node(name_map[name_i])
    for i in range(len(var_names)):
        for j in range(i+1, len(var_names)):
            x = df[var_names[i]]
            y = df[var_names[j]]
            corr, pval = scipy.stats.pearsonr(x, y)
            weight = adj[i, j]
            # 边两端都不是区县才添加
            if abs(weight) > threshold and pval < 0.05 and name_map[var_names[i]] != "区县" and name_map[var_names[j]] != "区县":
                G.add_edge(name_map[var_names[i]], name_map[var_names[j]], weight=weight, pval=pval)
    return G

def get_network_annotation(params):
    return (
        f"节点大小：介数中心性\n"
        f"红线：正相关，蓝线：负相关\n"
        f"仅显示统计显著（p<0.05），边权重阈值：{params['edge_threshold']}\n"
        f"模型正则化参数 alpha：{params['mgm_alpha']}\n"
        f"剔除唯一值≤{params['unique_min']}、方差<{params['std_min']}的变量\n"
        f"区县仅参与建模，不在图中显示"
    )

def filter_vars(df, var_names, unique_min, std_min):
    nunique = df.nunique()
    # 只对数值型列计算标准差
    stds = df.select_dtypes(include=[np.number]).std()
    drop_vars = nunique[nunique <= unique_min].index.tolist() + stds[stds < std_min].index.tolist()
    drop_vars = list(set(drop_vars))
    df = df.drop(columns=drop_vars)
    var_names = [v for v in var_names if v not in drop_vars]
    return df, var_names, drop_vars

def get_node_metrics(G):
    """返回节点的度数、介数中心性、特征向量中心性、接近中心性字典"""
    return {
        "degree": dict(G.degree()),
        "betweenness": nx.betweenness_centrality(G),
        "eigenvector": nx.eigenvector_centrality(G),
        "closeness": nx.closeness_centrality(G)
    }

def print_node_metrics(G):
    metrics = get_node_metrics(G)
    print("\n节点指标：")
    print("变量\t度数\t介数中心性\t特征向量中心性\t接近中心性")
    for node in G.nodes():
        print(f"{node}\t{metrics['degree'][node]}\t{metrics['betweenness'][node]:.3f}\t{metrics['eigenvector'][node]:.3f}\t{metrics['closeness'][node]:.3f}")

def draw_network(G, title, save_path, ch_font, params, node_color='skyblue'):
    pos = nx.spring_layout(G, seed=42)
    edges = G.edges(data=True)
    weights = [max(abs(d['weight'])*20, 2) for (u, v, d) in edges]
    edge_colors = ['red' if d['weight'] > 0 else 'blue' for (u, v, d) in edges]
    centrality = nx.betweenness_centrality(G)
    sizes = [centrality[n]*2000 + 800 for n in G.nodes()]
    fig = plt.figure(figsize=(8, 6))
    nx.draw_networkx_nodes(G, pos, node_color=node_color, node_size=sizes, edgecolors='white', linewidths=2)
    nx.draw_networkx_labels(G, pos, font_size=6, font_family=ch_font.get_name())
    nx.draw_networkx_edges(G, pos, width=weights, edge_color=edge_colors, style='solid')
    edge_labels = {(u, v): f"{d['weight']:.2f}" for (u, v, d) in edges}
    nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=4)
    plt.title(title, fontproperties=ch_font, fontsize=14)
    plt.axis('off')
    annotation = get_network_annotation(params)
    plt.text(0.01, 0.01, annotation, transform=fig.transFigure,
             fontsize=10, color='gray', verticalalignment='bottom', fontproperties=ch_font)
    fig.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close(fig)

def draw_network_subplot(G, ax, ch_font, params, node_color='skyblue', title=None):
    pos = nx.spring_layout(G, seed=42)
    edges = G.edges(data=True)
    weights = [max(abs(d['weight'])*20, 2) for (u, v, d) in edges]
    edge_colors = ['red' if d['weight'] > 0 else 'blue' for (u, v, d) in edges]
    centrality = nx.betweenness_centrality(G)
    sizes = [centrality[n]*2000 + 800 for n in G.nodes()]
    nx.draw_networkx_nodes(G, pos, ax=ax, node_color=node_color, node_size=sizes, edgecolors='white', linewidths=2)
    nx.draw_networkx_labels(G, pos, ax=ax, font_size=6, font_family=ch_font.get_name())
    nx.draw_networkx_edges(G, pos, ax=ax, width=weights, edge_color=edge_colors, style='solid')
    edge_labels = {(u, v): f"{d['weight']:.2f}" for (u, v, d) in edges}
    nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, ax=ax, font_size=4)
    if title:
        ax.set_title(title, fontproperties=ch_font, fontsize=14)
    ax.axis('off')

def draw_multi_year_networks(df_net, fields_core, control_vars, ch_font, save_path, params):
    years = sorted(df_net["年份"].dropna().unique())
    fig, axes = plt.subplots(1, len(years), figsize=(8*len(years), 6))
    if len(years) == 1:
        axes = [axes]
    node_metrics_all = []
    control_vars_no_year_quxian = [v for v in control_vars if v != "年份"]
    year_networks = []  # 新增：保存每年的网络
    for idx, year in enumerate(years):
        df_year = df_net[df_net["年份"] == year].copy()
        var_names = fields_core + [v for v in control_vars_no_year_quxian if v not in fields_core]
        df_year, var_names, _ = filter_vars(df_year, var_names, params['unique_min'], params['std_min'])
        fields_core = [v for v in fields_core if v in var_names]
        control_vars_no_year_quxian = [v for v in control_vars_no_year_quxian if v in var_names]
        try:
            adj, _ = run_mgm(df_year, fields_core, control_vars_no_year_quxian, alpha=params['mgm_alpha'], max_iter=5000)
            G = build_network_from_adj(adj, var_names, df_year, threshold=params['edge_threshold'])
        except Exception as e:
            print(f"{year}年数据建模失败：{e}")
            continue
        ax = axes[idx]
        draw_network_subplot(G, ax, ch_font, params, node_color='skyblue', title=f"{int(year)}年")
        # 节点指标统计
        metrics = get_node_metrics(G)
        node_metrics_all.append((year, metrics))
        year_networks.append((year, G))  # 保存每年的网络
    # 统一注释
    annotation = get_network_annotation(params)
    fig.text(0.01, 0.01, annotation,
        fontsize=10, color='gray', verticalalignment='bottom', fontproperties=ch_font)
    plt.tight_layout(rect=[0, 0.03, 1, 1])
    fig.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close(fig)
    import pandas as pd
    for year, metrics in node_metrics_all:
        print(f"\n{year}年节点指标：")
        df_metrics = pd.DataFrame(metrics, columns=["变量", "度数", "介数中心性", "特征向量中心性", "接近中心性"])
        print(df_metrics.to_string(index=False))
    
    # 新增：比较网络结构
    compare_network_structure(year_networks)
    compare_edge_changes(year_networks)
    return node_metrics_all, year_networks

def compare_network_structure(year_networks):
    """
    输入：year_networks = [(年份, G), ...]
    输出：各年份网络结构对比，包括连通性、核心节点、孤立节点、中心变量变化，以及整体指标
    """
    import pandas as pd
    summary = []
    for year, G in year_networks:
        # 连通分量数量
        n_components = nx.number_connected_components(G)
        # 最大连通分量节点数
        largest_cc = max(nx.connected_components(G), key=len)
        largest_cc_size = len(largest_cc)
        # 孤立节点
        isolated = list(nx.isolates(G))
        # 核心节点（介数中心性最大前3）
        centrality = nx.betweenness_centrality(G)
        core_nodes = sorted(centrality.items(), key=lambda x: x[1], reverse=True)[:3]
        # 网络密度
        density = nx.density(G)
        # 平均路径长度（只对最大连通分量计算，避免孤立节点报错）
        if G.number_of_edges() > 0 and largest_cc_size > 1:
            avg_path_length = nx.average_shortest_path_length(G.subgraph(largest_cc))
        else:
            avg_path_length = float('nan')
        # 平均聚类系数
        clustering = nx.average_clustering(G)
        summary.append({
            "年份": int(year),
            "节点总数": G.number_of_nodes(),
            "边总数": G.number_of_edges(),
            "连通分量数": n_components,
            "最大分量节点数": largest_cc_size,
            "网络密度": round(density, 3),
            "平均路径长度": round(avg_path_length, 3) if not pd.isna(avg_path_length) else "NA",
            "平均聚类系数": round(clustering, 3),
            "孤立节点": ",".join(isolated),
            "核心节点": ",".join([n for n, _ in core_nodes])
        })
    df_summary = pd.DataFrame(summary)
    print("\n各年份网络结构对比（含整体指标）：")
    print(df_summary.to_string(index=False))

    # 观察核心节点变化
    print("\n各年份核心节点变化：")
    for row in summary:
        print(f"{row['年份']}年核心节点：{row['核心节点']}")

    # 观察孤立节点变化
    print("\n各年份孤立节点变化：")
    for row in summary:
        print(f"{row['年份']}年孤立节点：{row['孤立节点']}")

def compare_edge_changes(year_networks):
    """
    输入：year_networks = [(年份, G), ...]
    输出：统计各年份显著边的变化，找出稳定边、新出现边和消失边
    """
    edge_years = dict()
    years = []
    for year, G in year_networks:
        years.append(int(year))
        for u, v in G.edges():
            edge = tuple(sorted([u, v]))
            if edge not in edge_years:
                edge_years[edge] = []
            edge_years[edge].append(int(year))

    stable_edges = [edge for edge, ys in edge_years.items() if len(ys) == len(years)]
    new_edges = [edge for edge, ys in edge_years.items() if ys[-1] == years[-1] and len(ys) == 1]
    disappeared_edges = []
    if len(years) > 1:
        prev_year = years[-2]
        for edge, ys in edge_years.items():
            if prev_year in ys and years[-1] not in ys:
                disappeared_edges.append(edge)

    print("\n稳定出现的边（所有年份都存在）：")
    for edge in stable_edges:
        print(edge)
    print("\n仅最新年份出现的新边：")
    for edge in new_edges:
        print(edge)
    print("\n上一年份消失的边：")
    for edge in disappeared_edges:
        print(edge)

def bootstrap_diff(data1, data2, n_resamples=1000, random_state=None):
    """
    Bootstrap检验两个样本均值差异的置信区间和p值
    返回：均值差异、置信区间、p值
    """
    data1 = np.array(data1)
    data2 = np.array(data2)
    diff_obs = np.mean(data1) - np.mean(data2)
    combined = np.concatenate([data1, data2])
    n1 = len(data1)
    rng = np.random.default_rng(random_state)
    diffs = []
    for _ in range(n_resamples):
        resample = rng.choice(combined, size=combined.shape[0], replace=True)
        resample1 = resample[:n1]
        resample2 = resample[n1:]
        diffs.append(np.mean(resample1) - np.mean(resample2))
    ci = np.percentile(diffs, [2.5, 97.5])
    p = np.mean(np.abs(diffs) >= np.abs(diff_obs))
    return diff_obs, ci, p

def test_node_centrality_diff(node_metrics_all, node_name):
    """
    检验某节点在不同年份的中心性指标（如介数中心性）是否有显著差异
    node_metrics_all: [(year, metrics)]，metrics为每个节点的指标列表
    node_name: 需要检验的节点名（如"幸福感"）
    """
    years = []
    centralities = []
    for year, metrics in node_metrics_all:
        df_metrics = pd.DataFrame(metrics, columns=["变量", "度数", "介数中心性", "特征向量中心性", "接近中心性"])
        val = df_metrics[df_metrics["变量"] == node_name]["介数中心性"].astype(float).values
        if len(val) > 0:
            years.append(int(year))
            centralities.append(val[0])
    if len(centralities) < 2:
        print(f"{node_name}在不同年份数据不足，无法检验。")
        return
    print(f"\n节点【{node_name}】各年份介数中心性：{dict(zip(years, centralities))}")
    # 两两年份做bootstrap检验
    for i in range(len(centralities)-1):
        diff, ci, p = bootstrap_diff([centralities[i]], [centralities[i+1]])
        print(f"{years[i]} vs {years[i+1]} 均值差异: {diff:.3f}, 95%CI: [{ci[0]:.3f}, {ci[1]:.3f}], p值: {p:.3f}")

def test_edge_weight_diff(year_networks, edge):
    """
    检验某边在不同年份的权重（相关性）是否有显著差异
    year_networks: [(year, G)]
    edge: 需要检验的边，如("幸福感", "年龄")
    """
    years = []
    weights = []
    for year, G in year_networks:
        if G.has_edge(*edge):
            w = G.get_edge_data(*edge)["weight"]
            years.append(int(year))
            weights.append(w)
    if len(weights) < 2:
        print(f"边{edge}在不同年份数据不足，无法检验。")
        return
    print(f"\n边{edge}各年份权重：{dict(zip(years, weights))}")
    # 两两年份做bootstrap检验
    for i in range(len(weights)-1):
        diff, ci, p = bootstrap_diff([weights[i]], [weights[i+1]])
        print(f"{years[i]} vs {years[i+1]} 均值差异: {diff:.3f}, 95%CI: [{ci[0]:.3f}, {ci[1]:.3f}], p值: {p:.3f}")


def add_age_group(df):
    """
    按年龄分组，生成“年龄组”字段。可根据实际需要调整分组区间和标签。
    """
    bins = [0, 35, 59, 120]
    labels = ["青年", "中年", "老年"]
    df["年龄组"] = pd.cut(df["年龄"], bins=bins, labels=labels, right=True)
    return df

def draw_multi_year_agegroup_networks(df_net, fields_core, control_vars, ch_font, save_path, params):
    import pandas as pd
    years = sorted(df_net["年份"].dropna().unique())
    age_groups = sorted(df_net["年龄组"].dropna().unique())
    results = {}
    for year in years:
        for age_group in age_groups:
            df_group = df_net[(df_net["年份"] == year) & (df_net["年龄组"] == age_group)].copy()
            if df_group.empty:
                continue
            # 排除“年龄”变量
            var_names = [v for v in fields_core + control_vars if v not in ("年份", "年龄组", "年龄")]
            df_group, var_names, _ = filter_vars(df_group, var_names, params['unique_min'], params['std_min'])
            fields_core_group = [v for v in fields_core if v in var_names and v != "年龄"]
            control_vars_group = [v for v in control_vars if v in var_names and v not in ("年份", "年龄组", "年龄")]
            try:
                adj, _ = run_mgm(df_group, fields_core_group, control_vars_group, alpha=params['mgm_alpha'], max_iter=5000)
                G = build_network_from_adj(adj, var_names, df_group, threshold=params['edge_threshold'])
            except Exception as e:
                print(f"{year}年 年龄组{age_group} 建模失败：{e}")
                continue
            print(f"\n{int(year)}年 年龄组{age_group} 网络结构：")
            print_node_metrics(G)
            results[(year, age_group)] = G

def plot_density_heatmap(df_summary, save_path="figures/网络密度热力图.png"):
    pivot = df_summary.pivot(index="年份", columns="年龄组", values="网络密度")
    plt.figure(figsize=(6, 5))
    sns.heatmap(pivot, annot=True, cmap="YlGnBu", fmt=".2f")
    plt.title("各年份-年龄组网络密度热力图")
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close()

def plot_happiness_centrality_bar(df_summary, results, save_path="figures/幸福感中心性条形图.png"):
    data = []
    for (year, age_group), G in results.items():
        if "幸福感" in G.nodes:
            c = nx.betweenness_centrality(G)["幸福感"]
            data.append({"年份": int(year), "年龄组": age_group, "中心性": c})  # 强制年份为int
    df = pd.DataFrame(data)
    plt.figure(figsize=(8, 5))
    for age_group in df["年龄组"].unique():
        sub = df[df["年龄组"] == age_group]
        plt.plot(sub["年份"], sub["中心性"], marker='o', label=str(age_group))
    plt.xlabel("年份")
    plt.ylabel("幸福感介数中心性")
    plt.title("各年龄组各年份幸福感介数中心性")
    plt.legend()
    # 设置横轴刻度为实际年份
    plt.xticks([2003, 2013, 2023])
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close()

# 批量检验所有稳定出现的边的权重变化
def get_stable_edges(year_networks):
    edge_years = dict()
    years = [int(year) for year, _ in year_networks]
    for year, G in year_networks:
        for u, v in G.edges():
            edge = tuple(sorted([u, v]))
            if edge not in edge_years:
                edge_years[edge] = []
            edge_years[edge].append(int(year))
    stable_edges = [edge for edge, ys in edge_years.items() if len(ys) == len(years)]
    return stable_edges
    
    
def save_and_show_fig(fig, path):
    fig.savefig(path, dpi=300, bbox_inches='tight')
    plt.close(fig)
