import os
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from sklearn.covariance import GraphicalLasso, LedoitWolf
from sklearn.preprocessing import StandardScaler
import scipy.stats
from scipy.stats import f_oneway, kruskal
import time
import seaborn as sns

# 这里可以继续补充 mgm 分析相关函数和主流程
def preprocess_data(df, fields_core, control_vars):
    df = df[fields_core + control_vars].copy()
    # 年份字段只保留数字
    if "年份" in df.columns:
        df["年份"] = df["年份"].astype(str).str.extract(r'(\d{4})').astype(float)
    for col in df.columns:
        # 不编码年份字段
        if col != "年份" and (df[col].dtype == 'object' or df[col].dtype.name == 'category'):
            df[col] = pd.Categorical(df[col]).codes
    # 只对数值型字段填充缺失
    num_cols = df.select_dtypes(include=[np.number]).columns
    df[num_cols] = df[num_cols].fillna(df[num_cols].median())
    return df

def run_mgm(df, fields_core, control_vars, alpha=0.2, max_iter=1000):
    data = df[fields_core + control_vars].values
    data = StandardScaler().fit_transform(data)  # 新增：标准化
    model = GraphicalLasso(alpha=alpha, max_iter=max_iter)
    model.fit(data)
    adj = np.abs(model.precision_)
    return adj, fields_core + control_vars

def run_ledoitwolf(df, fields_core, control_vars):
    data = df[fields_core + control_vars].values
    model = LedoitWolf()
    model.fit(data)
    adj = np.abs(model.covariance_)
    return adj, fields_core + control_vars

def get_chinese_name(var):
    # 优先查核心变量映射，否则查控制变量映射，否则原名
    return CORE_NAME_MAP.get(var, CONTROL_NAME_MAP.get(var, var))

def build_network_from_adj(adj, var_names, df, threshold=0.10):
    G = nx.Graph()
    name_map = {v: CORE_NAME_MAP.get(v, CONTROL_NAME_MAP.get(v, v)) for v in var_names}
    for i, name_i in enumerate(var_names):
        if name_map[name_i] != "区县":  # 不添加区县节点
            G.add_node(name_map[name_i])
    for i in range(len(var_names)):
        for j in range(i+1, len(var_names)):
            x = df[var_names[i]]
            y = df[var_names[j]]
            corr, pval = scipy.stats.pearsonr(x, y)
            weight = adj[i, j]
            # 边两端都不是区县才添加
            if abs(weight) > threshold and pval < 0.05 and name_map[var_names[i]] != "区县" and name_map[var_names[j]] != "区县":
                G.add_edge(name_map[var_names[i]], name_map[var_names[j]], weight=weight, pval=pval)
    return G

def get_network_annotation(params):
    return (
        f"节点大小：介数中心性\n"
        f"红线：正相关，蓝线：负相关\n"
        f"仅显示统计显著（p<0.05），边权重阈值：{params['edge_threshold']}\n"
        f"模型正则化参数 alpha：{params['mgm_alpha']}\n"
        f"剔除唯一值≤{params['unique_min']}、方差<{params['std_min']}的变量\n"
        f"区县仅参与建模，不在图中显示"
    )

def filter_vars(df, var_names, unique_min, std_min):
    nunique = df.nunique()
    # 只对数值型列计算标准差
    stds = df.select_dtypes(include=[np.number]).std()
    drop_vars = nunique[nunique <= unique_min].index.tolist() + stds[stds < std_min].index.tolist()
    drop_vars = list(set(drop_vars))
    df = df.drop(columns=drop_vars)
    var_names = [v for v in var_names if v not in drop_vars]
    return df, var_names, drop_vars

def get_node_metrics(G):
    """返回节点的度数、介数中心性、特征向量中心性、接近中心性字典"""
    return {
        "degree": dict(G.degree()),
        "betweenness": nx.betweenness_centrality(G),
        "eigenvector": nx.eigenvector_centrality(G),
        "closeness": nx.closeness_centrality(G)
    }

def print_node_metrics(G):
    metrics = get_node_metrics(G)
    print("\n节点指标：")
    print("变量\t度数\t介数中心性\t特征向量中心性\t接近中心性")
    for node in G.nodes():
        print(f"{node}\t{metrics['degree'][node]}\t{metrics['betweenness'][node]:.3f}\t{metrics['eigenvector'][node]:.3f}\t{metrics['closeness'][node]:.3f}")

def draw_network(G, title, save_path, ch_font, params, node_color='skyblue'):
    pos = nx.spring_layout(G, seed=42)
    edges = G.edges(data=True)
    weights = [max(abs(d['weight'])*20, 2) for (u, v, d) in edges]
    edge_colors = ['red' if d['weight'] > 0 else 'blue' for (u, v, d) in edges]
    centrality = nx.betweenness_centrality(G)
    sizes = [centrality[n]*2000 + 800 for n in G.nodes()]
    fig = plt.figure(figsize=(8, 6))
    nx.draw_networkx_nodes(G, pos, node_color=node_color, node_size=sizes, edgecolors='white', linewidths=2)
    nx.draw_networkx_labels(G, pos, font_size=6, font_family=ch_font.get_name())
    nx.draw_networkx_edges(G, pos, width=weights, edge_color=edge_colors, style='solid')
    edge_labels = {(u, v): f"{d['weight']:.2f}" for (u, v, d) in edges}
    nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=4)
    plt.title(title, fontproperties=ch_font, fontsize=14)
    plt.axis('off')
    annotation = get_network_annotation(params)
    plt.text(0.01, 0.01, annotation, transform=fig.transFigure,
             fontsize=10, color='gray', verticalalignment='bottom', fontproperties=ch_font)
    fig.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close(fig)

def draw_network_subplot(G, ax, ch_font, params, node_color='skyblue', title=None):
    pos = nx.spring_layout(G, seed=42)
    edges = G.edges(data=True)
    weights = [max(abs(d['weight'])*20, 2) for (u, v, d) in edges]
    edge_colors = ['red' if d['weight'] > 0 else 'blue' for (u, v, d) in edges]
    centrality = nx.betweenness_centrality(G)
    sizes = [centrality[n]*2000 + 800 for n in G.nodes()]
    nx.draw_networkx_nodes(G, pos, ax=ax, node_color=node_color, node_size=sizes, edgecolors='white', linewidths=2)
    nx.draw_networkx_labels(G, pos, ax=ax, font_size=6, font_family=ch_font.get_name())
    nx.draw_networkx_edges(G, pos, ax=ax, width=weights, edge_color=edge_colors, style='solid')
    edge_labels = {(u, v): f"{d['weight']:.2f}" for (u, v, d) in edges}
    nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, ax=ax, font_size=4)
    if title:
        ax.set_title(title, fontproperties=ch_font, fontsize=14)
    ax.axis('off')

def draw_multi_year_networks(df_net, fields_core, control_vars, ch_font, save_path, params):
    years = sorted(df_net["年份"].dropna().unique())
    fig, axes = plt.subplots(1, len(years), figsize=(8*len(years), 6))
    if len(years) == 1:
        axes = [axes]
    node_metrics_all = []
    control_vars_no_year_quxian = [v for v in control_vars if v != "年份"]
    year_networks = []  # 新增：保存每年的网络
    for idx, year in enumerate(years):
        df_year = df_net[df_net["年份"] == year].copy()
        var_names = fields_core + [v for v in control_vars_no_year_quxian if v not in fields_core]
        df_year, var_names, _ = filter_vars(df_year, var_names, params['unique_min'], params['std_min'])
        fields_core = [v for v in fields_core if v in var_names]
        control_vars_no_year_quxian = [v for v in control_vars_no_year_quxian if v in var_names]
        try:
            adj, _ = run_mgm(df_year, fields_core, control_vars_no_year_quxian, alpha=params['mgm_alpha'], max_iter=5000)
            G = build_network_from_adj(adj, var_names, df_year, threshold=params['edge_threshold'])
        except Exception as e:
            print(f"{year}年数据建模失败：{e}")
            continue
        ax = axes[idx]
        draw_network_subplot(G, ax, ch_font, params, node_color='skyblue', title=f"{int(year)}年")
        # 节点指标统计
        metrics = get_node_metrics(G)
        node_metrics_all.append((year, metrics))
        year_networks.append((year, G))  # 保存每年的网络
    # 统一注释
    annotation = get_network_annotation(params)
    fig.text(0.01, 0.01, annotation,
        fontsize=10, color='gray', verticalalignment='bottom', fontproperties=ch_font)
    plt.tight_layout(rect=[0, 0.03, 1, 1])
    fig.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close(fig)
    import pandas as pd
    for year, metrics in node_metrics_all:
        print(f"\n{year}年节点指标：")
        df_metrics = pd.DataFrame(metrics, columns=["变量", "度数", "介数中心性", "特征向量中心性", "接近中心性"])
        print(df_metrics.to_string(index=False))
    
    # 新增：比较网络结构
    compare_network_structure(year_networks)
    compare_edge_changes(year_networks)
    return node_metrics_all, year_networks

