from typing import Tuple, Optional
from MDAnalysis import Universe
from MDAnalysis.analysis.rdf import InterRDF
import warnings
import matplotlib.pyplot as plt
import networkx as nx
from collections import Counter
import itertools
import numpy as np
import random
import os
import networkx as nx
import time
import pickle
import graph_tool as gt
import graph_tool.topology as topology

import GraphTools.GEBC_break as Gb
import GraphTools.basic_method as bm


def edge_distance(G, e1, e2, add_one=True):
    u1, v1 = e1
    u2, v2 = e2
    # 四种端点对之间的最短节点距离

    dmin = min(
        nx.shortest_path_length(G, u1, u2),
        nx.shortest_path_length(G, u1, v2),
        nx.shortest_path_length(G, v1, u2),
        nx.shortest_path_length(G, v1, v2),
    )
    return dmin + 1 if add_one else dmin


# ---------- 2) 生成 代际平均密度 的核心的三个函数 ----------

def average_generational_space(G, max_depth=None, sample_size=50, seed=None):
    """
    返回长度 max_depth 的数组  S_bar[k-1] = \bar S(k)

    如果 max_depth=None，则不先算直径，而是对样本 edges 做完整 BFS，
    累加所有 k 的出现次数，最后再确定 observed 的最大 k。
    """
    rng = random.Random(seed)
    box = nx.line_graph(G)

    all_edges = list(G.edges())
    if (sample_size is None) or (sample_size >= len(all_edges)):
        sample_edges = all_edges
    else:
        sample_edges = rng.sample(all_edges, sample_size)

    # 如果用户指定了 max_depth，就按原来逻辑 pre-allocate
    if max_depth is not None:
        S_accum = np.zeros(max_depth, dtype=float)
        for e0 in sample_edges:
            dist_dict = nx.single_source_shortest_path_length(
                box, e0, cutoff=max_depth
            )
            for k in range(1, max_depth+1):
                S_accum[k-1] += sum(1 for d in dist_dict.values() if d == k)
        S_bar = S_accum / len(sample_edges)
        return S_bar

    # --- 优化路径：max_depth=None ---
    # 用 Counter 来累加所有 BFS 的 k 分布
    k_counter = Counter()
    for e0 in sample_edges:
        dist_dict = nx.single_source_shortest_path_length(box, e0)
        for d in dist_dict.values():
            if d > 0:           # 忽略 self-distance == 0
                k_counter[d] += 1

    # 找到样本里实际出现的最大深度
    max_k_obs = max(k_counter) if k_counter else 0

    # 构造 S_accum 数组
    S_accum = np.zeros(max_k_obs, dtype=float)
    for k, count in k_counter.items():
        S_accum[k-1] = count

    S_bar = S_accum / len(sample_edges)
    return S_bar


def ave_generation_density(G, marked_edges, n_random, max_k=None, seed=None):
    """
    Compute the random baseline using the generational space method.

    Parameters:
    - G: networkx.Graph
    - marked_edges: list of edges marked by flag_attr
    - n_random: number of realizations for generational sampling
    - max_k: maximum depth for space generations (if None, will use graph diameter)
    - seed: optional random seed for reproducibility

    Returns:
    - g_rand_2: numpy array of length max_k containing the average pair density
    """
    M = len(marked_edges)
    if M < 2:
        raise ValueError(
            "At least two marked edges are required for space generation.")

    # compute generational space S_k
    S_k = average_generational_space(
        G, max_depth=max_k, sample_size=n_random, seed=seed)

    # convert to pair density: rho0 = M / total edges, divide by 2 for undirected pairing
    rho0 = M / G.number_of_edges() / 2  # 除以二是因为这个密度是pair 的密度，所有要除以二。
    return S_k * rho0


# ---------- 3) 计算拓扑对关联函数 ----------

def count_pair_num(G, edges, max_depth=None, mass_weight_flag=None):
    """
    计算边对之间的距离分布，支持质量加权。

    参数:
    - G: networkx.Graph，输入图
    - edges: list，边列表
    - max_depth: int，最大距离
    - mass_weight_flag: str，质量权重属性名，如果为None则不使用质量加权

    返回:
    - cnt_array: numpy.ndarray，距离分布数组
    - total_weight: float，总权重
    """
    # 获取边的质量权重
    edge_weights = {}
    if mass_weight_flag:
        for u, v in edges:
            weight = G[u][v].get(mass_weight_flag, 1.0)
            edge_weights[(u, v)] = weight
            edge_weights[(v, u)] = weight
    else:
        for u, v in edges:
            edge_weights[(u, v)] = 1.0
            edge_weights[(v, u)] = 1.0

    # 计算每对边的距离
    cnt = Counter()

    for e1, e2 in itertools.combinations(edges, 2):
        k = edge_distance(G, e1, e2)
        if max_depth is not None and k > max_depth:
            continue

        # 1 计算权重（如果有质量权重则使用质量乘积）
        cnt[k] += edge_weights[e1]

    # 转换为数组格式，与count_pair_num保持一致
    if max_depth is None:
        max_depth = max(cnt.keys()) if cnt else 0

    cnt_array = np.zeros(max_depth + 1)
    for k, v in cnt.items():
        if k <= max_depth:
            cnt_array[k] = v

    return cnt_array[1:]  # 从k=1开始返回，与count_pair_num保持一致


def __topological_pair_corr_old1(G, marked_edges, n_random=50):
    M = len(marked_edges)
    if M < 2:
        raise ValueError("至少需要两条被标记的边")
    # 先算一次,拿到 max_k
    g_count = count_pair_num(G, marked_edges)
    g_real = g_count/M
    max_k = len(g_real)
    g_ave = ave_generation_density(
        G, marked_edges, max_k=max_k, n_random=n_random)

    # 归一化—真实除以基线密度
    with np.errstate(divide='ignore', invalid='ignore'):
        g_rel = np.where(g_ave > 0, g_real / g_ave, 0)

    ks = np.arange(1, max_k+1)
    return ks, g_rel


def __topological_pair_corr_old(G, marked_edges, n_random=50):
    M = len(marked_edges)
    if M < 2:
        raise ValueError("至少需要两条被标记的边")
    # 先算一次,拿到 max_k
    g_count = count_pair_num(G, marked_edges)

    max_k = len(g_count)

    # compute generational space S_k
    S_k = average_generational_space(
        G, max_depth=max_k, sample_size=n_random)
    # convert to pair density: rho0 = M / total edges, divide by 2 for undirected pairing
    rho0 = M / G.number_of_edges() / 2  # 除以二是因为这个密度是pair 的密度，所有要除以二。
    g_ave = S_k * rho0

    # 归一化—真实除以基线密度
    with np.errstate(divide='ignore', invalid='ignore'):
        g_rel = np.where(g_ave > 0, g_count / (M * S_k * rho0), 0)

    ks = np.arange(1, max_k+1)
    return ks, g_rel


def topological_pair_corr(G, marked_edges, max_depth=None, sample_size=50,
                          seed=None, mass_weight_flag=None):
    """
    计算质量加权的拓扑对关联函数 g_rel^(m)(k)。

    参数:
    - G: networkx.Graph，输入图
    - max_depth: int，最大距离
    - sample_size: int，采样次数
    - seed: int，随机种子
    - mass_weight_flag: str，质量权重属性名

    返回:
    - ks: numpy.ndarray，距离k的数组
    - g_rel: numpy.ndarray，相对关联函数值 g_rel^(m)(k)
    """
    M = len(marked_edges)
    if M < 2:
        raise ValueError("至少需要两条被标记的边")

    # 1. 计算带质量的对数直方图 N^(m)(k)
    cnt = count_pair_num(G, marked_edges, max_depth, mass_weight_flag)

    # 2. 计算空间平均基线 S^(k)
    S_bar = average_generational_space(
        G, max_depth=len(cnt), sample_size=sample_size, seed=seed)

    # 3. 计算质量密度 ρ0^(m)
    if mass_weight_flag:
        # 计算总断裂质量 M*
        total_mass = sum(G[u][v].get(mass_weight_flag, 0)
                         for u, v in marked_edges)
        # 计算质量密度 ρ0^(m) = (M*) / (2|E|)
        rho0_m = total_mass / (2 * G.number_of_edges())
    else:
        # 如果没有质量权重，则使用数量密度
        rho0_m = M / (2 * G.number_of_edges())
    # 4. 计算质量加权对关联函数 g_rel^(m)(k)
    with np.errstate(divide='ignore', invalid='ignore'):
        g_rel = np.where(S_bar > 0, cnt / (M * rho0_m * S_bar), 0)

    ks = np.arange(1, len(cnt) + 1)
    return ks, g_rel


# 2025-05-11 可视化的GIF图


def create_gif_for_topo(all_data: dict, output_dir: str, fps: int = 10,
                        y_range=[0, 2],) -> None:
    import os
    import numpy as np
    import matplotlib.pyplot as plt
    from matplotlib.animation import FuncAnimation

    os.makedirs(output_dir, exist_ok=True)
    pics_dir = os.path.join(output_dir, 'pics')
    os.makedirs(pics_dir, exist_ok=True)

    times = all_data.get('times', [])
    correlation_data = all_data.get('correlation_data', [])
    if not times or not correlation_data:
        raise RuntimeError("all_data 必须包含 'times' 和 'correlation_data'")

    if len(times) != len(correlation_data):
        raise RuntimeError("'times' 和 'correlation_data' 长度不一致")

    # 全局坐标范围
    all_ks = np.hstack([entry['ks'] for entry in correlation_data])
    all_g = np.hstack([entry['g_rel'] for entry in correlation_data])
    x_min, x_max = all_ks.min(), all_ks.max()
    # y_min, y_max = all_g.min(),  all_g.max()
    y_min, y_max = y_range

    # 静态图
    for t, entry in zip(times, correlation_data):
        ks, g_rel = entry['ks'], entry['g_rel']
        fig, ax = plt.subplots(figsize=(8, 6))
        ax.plot(ks, g_rel, 'b-', lw=2)
        ax.set_xlim(x_min, x_max)
        ax.set_ylim(y_min, y_max)
        ax.set_title(f"Topological Pair Correlation Function (t={t})")
        ax.set_xlabel('Distance k')
        ax.set_ylabel('Relative Correlation Function g_rel(k)')
        ax.grid(True)
        fname = f"correlation_t{t}.png"
        plt.savefig(os.path.join(pics_dir, fname))
        plt.close()

    # Animation
    fig, ax = plt.subplots(figsize=(8, 6))
    ax.set_xlim(x_min, x_max)
    ax.set_ylim(y_min, y_max)
    ax.set_xlabel('Distance k')
    ax.set_ylabel('Relative Correlation Function g_rel(k)')
    ax.grid(True)
    line, = ax.plot([], [], 'b-', lw=2)

    def init():
        line.set_data([], [])
        return (line,)

    def animate(i):
        t = times[i]
        ks = correlation_data[i]['ks']
        g_rel = correlation_data[i]['g_rel']
        line.set_data(ks, g_rel)
        ax.set_title(f"Topological Pair Correlation Function (t={t})")
        return (line,)

    anim = FuncAnimation(
        fig, animate, init_func=init,
        frames=len(times),
        interval=1000//fps, blit=True
    )

    gif_path = os.path.join(output_dir, 'correlation_in_topo_space.gif')
    anim.save(gif_path, writer='pillow', fps=fps)
    plt.close(fig)
    print(f"已生成 GIF: {gif_path}")


def create_gif_for_real(all_data: dict, output_dir: str, fps: int = 10,
                        y_range=None) -> None:
    import os
    import numpy as np
    import matplotlib.pyplot as plt
    from matplotlib.animation import FuncAnimation

    os.makedirs(output_dir, exist_ok=True)
    pics_dir = os.path.join(output_dir, 'pics')
    os.makedirs(pics_dir, exist_ok=True)

    times = all_data.get('times', [])
    correlation_data = all_data.get('correlation_data', [])
    if not times or not correlation_data:
        raise RuntimeError("all_data 必须包含 'times' 和 'correlation_data'")

    if len(times) != len(correlation_data):
        raise RuntimeError("'times' 和 'correlation_data' 长度不一致")

    # 全局坐标范围
    all_ks = np.hstack([entry['ks'] for entry in correlation_data])
    all_g = np.hstack([entry['g_r'] for entry in correlation_data])
    x_min, x_max = all_ks.min(), all_ks.max()
    y_min, y_max = all_g.min(),  all_g.max()
    # y_min, y_max = y_range

    # 静态图
    for t, entry in zip(times, correlation_data):
        ks, g_r = entry['ks'], entry['g_r']
        fig, ax = plt.subplots(figsize=(8, 6))
        ax.plot(ks, g_r, 'b-', lw=2)
        ax.set_xlim(x_min, x_max)
        ax.set_ylim(y_min, y_max)
        ax.set_title(f"Topological Pair Correlation Function (t={t})")
        ax.set_xlabel('Distance k')
        ax.set_ylabel('Relative Correlation Function g_r(k)')
        ax.grid(True)
        fname = f"correlation_t{t}.png"
        plt.savefig(os.path.join(pics_dir, fname))
        plt.close()

    # Animation
    fig, ax = plt.subplots(figsize=(8, 6))
    ax.set_xlim(x_min, x_max)
    ax.set_ylim(y_min, y_max)
    ax.set_xlabel('Distance k')
    ax.set_ylabel('Relative Correlation Function g_r(k)')
    ax.grid(True)
    line, = ax.plot([], [], 'b-', lw=2)

    def init():
        line.set_data([], [])
        return (line,)

    def animate(i):
        t = times[i]
        ks = correlation_data[i]['ks']
        g_r = correlation_data[i]['g_r']
        line.set_data(ks, g_r)
        ax.set_title(f"Topological Pair Correlation Function (t={t})")
        return (line,)

    anim = FuncAnimation(
        fig, animate, init_func=init,
        frames=len(times),
        interval=1000//fps, blit=True
    )

    gif_path = os.path.join(output_dir, 'correlation_in_real_space.gif')
    anim.save(gif_path, writer='pillow', fps=fps)
    plt.close(fig)
    print(f"已生成 GIF: {gif_path}")

# 2025-05-11 汇总的重要的函数


def graph_frame_to_corr_func_topo(sub_mission_path):
    '''
    2025-05-11
    {
    'ks': List[float],        # 关联函数的距离轴
    'g_rel': List[float],     # 关联函数值
    'x_range': (float, float),# ks 的最小值和最大值
    'y_range': (float, float) # g_rel 的最小值和最大值
    }
    ```                                                                                   |
    | 循环内临时变量               |                                               |                                                                                                 |
    | `t`                      | `int`                                         | 当前采样的时间步.                                                                                |
    | `g`                      | `MultiGraph`                                  | `frame[t]['graph']`,当前时间步对应的图.                                                         |
    | `current_edges`          | `Set[Tuple[node, node]]`                      | 当前帧的边集合:`set(g.edges())`.                                                              |
    | `missing`                | `Set[Tuple[node, node]]`                      | 缺失边集合:`initial_edges - current_edges`.                                                    |
    | `start_time`, `end_time` | `float`                                       | 用于测量 `topological_pair_corr` 函数执行耗时的时间戳(秒).                                       |
    | `missing_sampled`        | `List[Tuple[node, node]]`                     | 用于关联计算的缺失边列表,当前直接取全量边,也可调用采样函数降维.                                  |
    | `ks`, `g_rel`            | `List[float]`, `List[float]`                  | 调用 `topological_pair_corr(initial_graph, missing_sampled, n_random=50)` 返回的两组数组.         |
    | `x_range`, `y_range`     | `(float, float)`, `(float, float)`            | 分别对应 `ks` 和 `g_rel` 的最小/最大范围,用于后续绘图或分析.                                       |
    | `data`                   | `Dict[str, Any]`                              | 当前时刻关联函数的完整数据(见上表 `correlation_data_list`).                                      |
    | `all_data`               | `Dict[str, Any]`                              | 汇总结果字典:  
    ```python
    {
    'times': List[int],           # saved_sample_times
    'correlation_data': List[dict]# correlation_data_list
    }
    ```                                                                                |
    | `all_data_path`           | `str`                                         | 汇总结果的输出文件:`analysis_topology/all_correlation_data.pkl`.                                    |

    ---

    ## 工作流程

    1. **准备输出目录**  
    在 `sub_mission_path` 下创建 `analysis_topology` 文件夹,用于存放中间数据与可视化结果.

    2. **加载帧数据**  
    从 `frame_series.pkl` 中加载 `frame` 字典;该字典按时间步(整数)索引,每一项包含一张 NetworkX 的 `MultiGraph`.

    3. **提取初始图与边集**  
    取最小时间步的图 `initial_graph`,并计算其边集 `initial_edges` 作为基准.

    4. **等距离抽样**  
    按时间序列等距选取 `N=50` 个时间点,生成 `sample_times`.

    5. **逐帧计算,筛选与采样**  
    对每个采样时刻:
    - 计算当前图与初始图的"缺失边" `missing`;  
    - 若缺失边数 ≤2,则跳过(认为变化太小无法计算);  
    - 否则记录该时刻,调用 `topological_pair_corr` 计算关联函数(参数 `n_random=50`),  
    - 记录 `ks`,`g_rel` 及其坐标范围.

    6. **保存结果**  
    将所有有效采样时刻及关联函数数据打包到 `all_data` 中,序列化到 `all_correlation_data.pkl`.

    7. **生成动画**  
    最后调用 `create_gif`,将每个时刻的关联函数曲线图合成 GIF,输出到 `analysis_topology` 目录.
    '''

    # 在 sub_mission_path 下创建 analysis_topology 文件夹
    analysis_topology_dir = os.path.join(sub_mission_path, 'analysis_topology')
    os.makedirs(analysis_topology_dir, exist_ok=True)

    pkl_file = os.path.join(sub_mission_path, 'frame_series.pkl')

    with open(pkl_file, 'rb') as f:
        frame = pickle.load(f)
    print(f"已加载文件: {pkl_file}")

    # frame: dict,键是 time‐step(int),值是 {'graph': MultiGraph}

    # 1. 记录初始图
    initial_graph = frame[min(frame.keys())]['graph']
    initial_edges = set(initial_graph.edges())

    # 2. 等距离抽样 N 个 frame
    #    这里 N 可按需设置
    N = 50
    all_times = sorted(frame.keys())
    # 生成 N 个等距索引(包括首尾)
    idxs = np.linspace(0, len(all_times)-1, N, dtype=int)
    sample_times = [all_times[i] for i in idxs]
    saved_sample_times = []
    # 3. 比较并记录缺失的边
    missing_edges_dict = {}
    correlation_data_list = []  # 存储所有关联函数数据

    for t in sample_times:
        # * 可选项
        g = frame[t]['graph']

        current_edges = set(g.edges())
        missing = initial_edges - current_edges
        missing_edges_dict[t] = missing

        if len(missing) <= 2:
            print(f"Time {t}: 缺失边数 = {len(missing)} (<=2),跳过此帧")
            continue
        saved_sample_times.append(t)
        print(f"Time {t}: 开始计算拓扑对关联函数,当前断裂键数 = {len(missing)}")

        # —— 新增:计时开始 ——
        start_time = time.time()

        # 使用sample_missing函数采样缺失的边
        missing_sampled = list(missing)
        # missing_sampled = bm.sample_missing(
        #     missing, threshold=2000, max_samples=5000)

        # 直接计算ks和g_rel
        ks, g_rel = topological_pair_corr(
            initial_graph, missing_sampled, n_random=50)

        # 确定坐标轴范围
        x_range = (min(ks), max(ks))
        y_range = (min(g_rel), max(g_rel))

        # 保存数据
        data = {
            'ks': ks,
            'g_rel': g_rel,
            'x_range': x_range,
            'y_range': y_range
        }
        correlation_data_list.append(data)

        end_time = time.time()
        duration = end_time - start_time
        # —— 新增:打印运行时间 ——
        print(f"Time {t}: topological_pair_corr 耗时 {duration:.4f} 秒")

    # 保存所有时间步的数据到一个总文件
    all_data = {
        'times': saved_sample_times,
        'correlation_data': correlation_data_list
    }

    all_data_path = os.path.join(
        analysis_topology_dir, 'all_correlation_data.pkl')
    # all_data_path = '/home/baixj/gel_network/analy_1_空间分布和拓扑空间的对关联函数/_out/all_correlation_data.pkl'
    with open(all_data_path, 'wb') as f:
        pickle.dump(all_data, f)

    print(f"已保存所有数据到: {all_data_path}")

    # # 1. 读取原始帧数据并处理
    # with open(all_data_path, 'rb') as f:
    #     all_data = pickle.load(f)
    # print(f"已加载文件: {all_data_path}")

    # -------- GIF ----------

    # 使用 create_gif 函数一键生成所有单帧图片和 GIF 动画
    create_gif_for_topo(
        all_data=all_data,
        output_dir=analysis_topology_dir,
        fps=10
    )


def graph_frame_to_corr_func_overtime(sub_mission_path):
    '''
    这个的不同在于拓扑网络随时间更新了,每一次的都不相同.但是没有完成,
    这个的意义在于真实地在发生断裂的时候他一定是受制于当下的拓扑关系的.

    2025-05-11
    {
    'ks': List[float],        # 关联函数的距离轴
    'g_rel': List[float],     # 关联函数值
    'x_range': (float, float),# ks 的最小值和最大值
    'y_range': (float, float) # g_rel 的最小值和最大值
    }
    ```                                                                                   |
    | 循环内临时变量               |                                               |                                                                                                 |
    | `t`                      | `int`                                         | 当前采样的时间步.                                                                                |
    | `g`                      | `MultiGraph`                                  | `frame[t]['graph']`,当前时间步对应的图.                                                         |
    | `current_edges`          | `Set[Tuple[node, node]]`                      | 当前帧的边集合:`set(g.edges())`.                                                              |
    | `missing`                | `Set[Tuple[node, node]]`                      | 缺失边集合:`initial_edges - current_edges`.                                                    |
    | `start_time`, `end_time` | `float`                                       | 用于测量 `topological_pair_corr` 函数执行耗时的时间戳(秒).                                       |
    | `missing_sampled`        | `List[Tuple[node, node]]`                     | 用于关联计算的缺失边列表,当前直接取全量边,也可调用采样函数降维.                                  |
    | `ks`, `g_rel`            | `List[float]`, `List[float]`                  | 调用 `topological_pair_corr(initial_graph, missing_sampled, n_random=50)` 返回的两组数组.         |
    | `x_range`, `y_range`     | `(float, float)`, `(float, float)`            | 分别对应 `ks` 和 `g_rel` 的最小/最大范围,用于后续绘图或分析.                                       |
    | `data`                   | `Dict[str, Any]`                              | 当前时刻关联函数的完整数据(见上表 `correlation_data_list`).                                      |
    | `all_data`               | `Dict[str, Any]`                              | 汇总结果字典:  
    ```python
    {
    'times': List[int],           # saved_sample_times
    'correlation_data': List[dict]# correlation_data_list
    }
    ```                                                                                |
    | `all_data_path`           | `str`                                         | 汇总结果的输出文件:`analysis_topology/all_correlation_data.pkl`.                                    |

    ---

    ## 工作流程

    1. **准备输出目录**  
    在 `sub_mission_path` 下创建 `analysis_topology` 文件夹,用于存放中间数据与可视化结果.

    2. **加载帧数据**  
    从 `frame_series.pkl` 中加载 `frame` 字典;该字典按时间步(整数)索引,每一项包含一张 NetworkX 的 `MultiGraph`.

    3. **提取初始图与边集**  
    取最小时间步的图 `initial_graph`,并计算其边集 `initial_edges` 作为基准.

    4. **等距离抽样**  
    按时间序列等距选取 `N=50` 个时间点,生成 `sample_times`.

    5. **逐帧计算,筛选与采样**  
    对每个采样时刻:
    - 计算当前图与初始图的"缺失边" `missing`;  
    - 若缺失边数 ≤2,则跳过(认为变化太小无法计算);  
    - 否则记录该时刻,调用 `topological_pair_corr` 计算关联函数(参数 `n_random=50`),  
    - 记录 `ks`,`g_rel` 及其坐标范围.

    6. **保存结果**  
    将所有有效采样时刻及关联函数数据打包到 `all_data` 中,序列化到 `all_correlation_data.pkl`.

    7. **生成动画**  
    最后调用 `create_gif`,将每个时刻的关联函数曲线图合成 GIF,输出到 `analysis_topology` 目录.
    '''

    # 在 sub_mission_path 下创建 analysis_topology 文件夹
    analysis_topology_dir = os.path.join(sub_mission_path, 'analysis_topology')
    os.makedirs(analysis_topology_dir, exist_ok=True)

    pkl_file = os.path.join(sub_mission_path, 'frame_series.pkl')

    with open(pkl_file, 'rb') as f:
        frame = pickle.load(f)
    print(f"已加载文件: {pkl_file}")

    # frame: dict,键是 time‐step(int),值是 {'graph': MultiGraph}

    # 1. 记录初始图
    initial_graph = frame[min(frame.keys())]['graph']
    initial_edges = set(initial_graph.edges())

    # 2. 等距离抽样 N 个 frame
    #    这里 N 可按需设置
    N = 50
    all_times = sorted(frame.keys())
    # 生成 N 个等距索引(包括首尾)
    idxs = np.linspace(0, len(all_times)-1, N, dtype=int)
    sample_times = [all_times[i] for i in idxs]
    saved_sample_times = []
    # 3. 比较并记录缺失的边
    missing_edges_dict = {}
    correlation_data_list = []  # 存储所有关联函数数据

    for t in sample_times:
        # * 可选项
        g = frame[t]['graph']

        current_edges = set(g.edges())
        missing = initial_edges - current_edges
        missing_edges_dict[t] = missing

        if len(missing) <= 2:
            print(f"Time {t}: 缺失边数 = {len(missing)} (<=2),跳过此帧")
            continue
        saved_sample_times.append(t)
        print(f"Time {t}: 开始计算拓扑对关联函数,当前断裂键数 = {len(missing)}")

        # —— 新增:计时开始 ——
        start_time = time.time()

        # 使用sample_missing函数采样缺失的边
        missing_sampled = list(missing)
        # missing_sampled = bm.sample_missing(
        #     missing, threshold=2000, max_samples=5000)

        # 直接计算ks和g_rel
        ks, g_rel = topological_pair_corr(
            initial_graph, missing_sampled, n_random=50)

        # 确定坐标轴范围
        x_range = (min(ks), max(ks))
        y_range = (min(g_rel), max(g_rel))

        # 保存数据
        data = {
            'ks': ks,
            'g_rel': g_rel,
            'x_range': x_range,
            'y_range': y_range
        }
        correlation_data_list.append(data)

        end_time = time.time()
        duration = end_time - start_time
        # —— 新增:打印运行时间 ——
        print(f"Time {t}: topological_pair_corr 耗时 {duration:.4f} 秒")

    # 保存所有时间步的数据到一个总文件
    all_data = {
        'times': saved_sample_times,
        'correlation_data': correlation_data_list
    }

    all_data_path = os.path.join(
        analysis_topology_dir, 'all_correlation_data.pkl')
    # all_data_path = '/home/baixj/gel_network/analy_1_空间分布和拓扑空间的对关联函数/_out/all_correlation_data.pkl'
    with open(all_data_path, 'wb') as f:
        pickle.dump(all_data, f)

    print(f"已保存所有数据到: {all_data_path}")

    # # 1. 读取原始帧数据并处理
    # with open(all_data_path, 'rb') as f:
    #     all_data = pickle.load(f)
    # print(f"已加载文件: {all_data_path}")

    # -------- GIF ----------

    # 使用 create_gif 函数一键生成所有单帧图片和 GIF 动画
    create_gif(
        all_data=all_data,
        output_dir=analysis_topology_dir,
        fps=10
    )

# 2025-05-11 实空间中的对关联函数


def corr_func_real(
    positions: np.ndarray,
    box_length: np.ndarray,
    nbins: int = 100,
    r_max: Optional[float] = None
) -> Tuple[np.ndarray, np.ndarray]:
    """
    在周期性边界条件下计算一批点的径向分布函数 g(r).

    Parameters
    ----------
    positions : (N, 3) np.ndarray
        原子或点的绝对坐标,相对于盒子原点 (0,0,0).
    box_length : (3,) np.ndarray
        周期性盒子的边长 [Lx, Ly, Lz].
    nbins : int, optional
        距离分箱数,默认为 100.
    r_max : float, optional
        最大统计距离;若为 None,则取最小盒边长的一半.

    Returns
    -------
    r_bins : (nbins,) np.ndarray
        每个距离 bin 的中心值.
    g_r : (nbins,) np.ndarray
        对应的径向分布函数值 g(r).
    """
    N = positions.shape[0]
    if N < 2:
        raise ValueError("至少需要 2 个点来计算径向分布函数")

    # 1. 确定最大统计距离
    if r_max is None:
        r_max = 0.5 * box_length.min()

    # 2. 构建最小 Universe 并设置坐标与盒子
    u = Universe.empty(n_atoms=N, trajectory=True)
    u.atoms.positions = positions
    # Universe.dimensions = [Lx, Ly, Lz, α, β, γ]
    u.dimensions = np.array([*box_length, 90.0, 90.0, 90.0])

    # 3. 计算 RDF
    rdf = InterRDF(
        u.atoms,
        u.atoms,
        nbins=nbins,
        range=(0.0, r_max),
        exclusion_block=(1, 1)  # 排除自我配对
    )
    rdf.run()

    # 4. 提取结果
    r_bins = rdf.bins
    g_r = rdf.rdf.copy()

    return r_bins, g_r


def graph_frame_to_corr_func_real(sub_mission_path, N_samples=50):
    """
    2025-05-11

    计算"缺失边"在真实空间中的径向关联函数 g(r).

    Parameters
    ----------
    sub_mission_path : str
        包含 frame_series.pkl 的目录.
    N_samples : int, optional
        等距离抽样的帧数,默认为 50.

    Returns
    -------
    all_data : dict
        {
          'times': List[int],           # 有效采样时刻
          'correlation_data': List[dict]# 每个时刻的 {'ks','g_r','x_range','y_range'}
        }
    """
    # 1. 准备输出目录
    analysis_dir = os.path.join(sub_mission_path, 'analysis_real_space')
    os.makedirs(analysis_dir, exist_ok=True)

    # 2. 加载帧数据
    with open(os.path.join(sub_mission_path, 'frame_series.pkl'), 'rb') as f:
        frame = pickle.load(f)
    print(f"Loaded frame_series.pkl with {len(frame)} frames")

    # 3. 提取初始图与边集
    t0 = min(frame.keys())
    initial_graph = frame[t0]['graph']
    initial_edges = set(initial_graph.edges())

    # 4. 等距离抽样
    all_times = sorted(frame.keys())
    idxs = np.linspace(0, len(all_times)-1, N_samples, dtype=int)
    sample_times = [all_times[i] for i in idxs]

    saved_times = []
    corr_data = []

    # 5. 逐帧计算
    for t in sample_times:
        G = frame[t]['graph']
        current_edges = set(G.edges())
        missing = list(initial_edges - current_edges)

        if len(missing) <= 2:
            print(f"Time {t}: 缺失边 ≤2,跳过")
            continue

        saved_times.append(t)
        print(f"Time {t}: 计算真实空间关联函数,缺失边数 = {len(missing)}")

        # 5.1 计算每条缺失边的中点坐标(考虑 PBC)
        #    假设 frame[t] 中也存有 box 信息,比如 frame[t]['box'] = np.array([[0,Lx],[0,Ly],[0,Lz]])

        box = bm.get_box_dimensions(initial_graph.graph['box'])
        # 长度 Lx, Ly, Lz

        position_key = bm.detect_position_key(initial_graph)
        mids = []
        for u, v in missing:
            pos_u = np.array(G.nodes[u][position_key])
            pos_v = np.array(G.nodes[v][position_key])
            delta = pos_v - pos_u

            # PBC 最短映射
            delta -= np.round(delta / box) * box
            mid = pos_u + 0.5 * delta
            mids.append(mid)

        positions = np.vstack(mids)  # shape (M,3)

        # 5.2 调用现有库计算 g(r)
        start = time.time()
        ks, g_r = corr_func_real(positions, box)
        dt = time.time() - start
        print(f"   corr_func_real 耗时 {dt:.3f}s")

        # 5.3 记录范围
        x_range = (float(ks.min()), float(ks.max()))
        y_range = (float(g_r.min()), float(g_r.max()))
        corr_data.append({
            'ks': ks,
            'g_r': g_r,
            'x_range': x_range,
            'y_range': y_range
        })

    # 6. 保存结果
    all_data = {'times': saved_times, 'correlation_data': corr_data}
    out_path = os.path.join(analysis_dir, 'all_corr_real.pkl')
    with open(out_path, 'wb') as f:
        pickle.dump(all_data, f)
    print(f"Saved all data to {out_path}")

    # (可选)生成 GIF
    try:
        create_gif_for_real(
            all_data=all_data,
            output_dir=analysis_dir,
            fps=10,
            y_range=(0, 2)  # 对关联函数大于0 ,自由气体是1
        )
    except ImportError:
        pass

    return all_data

# ------------------------- 直接计算图的一些性质 -------------------------
# 2025-05-12 计算拓扑空间的结构,利用抽样法


def analyze_and_plot_generational_space(graph, max_generation=None, analysis_topology_dir=None):
    """
    分析图的平均代际空间并绘制结果图像.

    参数
    ----
    graph : nx.Graph or nx.MultiGraph
        待分析的图.
    max_generation : int or None
        最大代际数。如果为 None，则使用图的直径。
    analysis_topology_dir : str or None
        图像保存的目录路径。如果为 None，则使用当前目录。

    返回
    ----
    None
    """
    # 计算平均代际空间
    space = average_generational_space(graph, max_depth=max_generation)

    # 如果未指定保存目录，使用当前目录
    if analysis_topology_dir is None:
        analysis_topology_dir = os.getcwd()

    # 绘制 space 并保存
    plt.figure(figsize=(8, 6))
    plt.plot(space, marker='o', linestyle='-', color='b',
             label='Average Generational Space')
    plt.xlabel('Generation')
    plt.ylabel('Average Space')
    plt.title('Average Generational Space vs Generation')
    plt.legend()
    plt.grid(True)

    # 保存图像
    output_path = os.path.join(
        analysis_topology_dir, 'average_generational_space.png')
    plt.savefig(output_path, dpi=300)
    plt.close()
    print(f"图像已保存至: {output_path}")

# 2025-05-18 绘制每个 edge 的 loop 分布


def compute_edge_cycle_stats(G, p=None, seed=None):
    """
    计算图 G 中(或随机采样的)每条边所参与的最小环长度分布及平均值.

    参数
    ----
    G : nx.Graph or nx.DiGraph or nx.MultiGraph or nx.MultiDiGraph
        输入图,支持有向/无向,简单/多重.
    p : float, optional
        对所有边随机采样的比例 (0 < p <= 1).默认 None 则对所有边计算.
    seed : int, optional
        随机种子,用于采样可复现.

    返回
    ----
    dict {
        'cycle_lengths': List[Optional[int]],
        'distribution' : Counter,
        'average_length': float or None
    }
    """
    is_multi = G.is_multigraph()
    is_dir = G.is_directed()

    # 先把所有边和它的属性拿出来
    if is_multi:
        # MultiGraph/MultiDiGraph:边元组(u, v, key, data_dict)
        edges = [(u, v, key, data.copy())
                 for u, v, key, data in G.edges(data=True, keys=True)]
    else:
        # Graph/DiGraph:边元组(u, v, None, data_dict)
        edges = [(u, v, None, data.copy())
                 for u, v, data in G.edges(data=True)]

    # 采样
    total = len(edges)
    if p is not None and 0 < p < 1:
        random.seed(seed)
        k = max(1, int(total * p))
        sampled = random.sample(edges, k)
    else:
        sampled = edges

    cycle_lengths = []

    for u, v, key, attr in sampled:
        # --------- 从 G 中移除这条边 ---------
        if is_multi:
            G.remove_edge(u, v, key=key)
        else:
            G.remove_edge(u, v)

        # --------- 计算最短路径 ---------
        try:
            if is_dir:
                # 有向图:要从 v 回到 u
                L = nx.shortest_path_length(G, v, u)
            else:
                # 无向图:从 u 到 v 即可
                L = nx.shortest_path_length(G, u, v)
            cycle_len = L + 1
        except nx.NetworkXNoPath:
            cycle_len = None

        cycle_lengths.append(cycle_len)

        # --------- 恢复这条边 ---------
        if is_multi:
            G.add_edge(u, v, key=key, **attr)
        else:
            G.add_edge(u, v, **attr)

    # 统计分布并计算平均
    finite_lengths = [L for L in cycle_lengths if L is not None]
    dist = Counter(finite_lengths)
    avg = sum(finite_lengths) / len(finite_lengths) if finite_lengths else None

    return {
        'cycle_lengths': cycle_lengths,
        'distribution': dist,
        'average_length': avg
    }


def plot_cycle_distribution(G, mission_path):

    # 计算最小环分布
    stats = compute_edge_cycle_stats(G)

    # 获取分布数据
    dist = stats['distribution']
    avg_len = stats['average_length']

    # 绘制分布图
    plt.figure(figsize=(10, 6))
    x = list(dist.keys())
    y = list(dist.values())

    plt.bar(x, y, alpha=0.8)
    plt.xlabel('Minimum Cycle Length')
    plt.ylabel('Count')
    plt.title(f'Minimum Cycle Distribution (Average Length: {avg_len:.2f})')

    # 添加平均值线
    plt.axvline(x=avg_len, color='r', linestyle='--',
                alpha=0.8, label=f'Average: {avg_len:.2f}')
    plt.legend()

    # 添加数值标签
    for i, v in enumerate(y):
        plt.text(x[i], v, str(v), ha='center', va='bottom')

    # Save the image
    save_dir = os.path.join(mission_path, 'analysis_topology')
    image_path = os.path.join(save_dir, 'minimal_cycles_distribution.png')
    plt.savefig(image_path)
    print(image_path)
    plt.close()

    # Save statistical information to a text file with the same name
    txt_path = os.path.join(save_dir, 'minimal_cycles_distribution.txt')
    with open(txt_path, 'w', encoding='utf-8') as f:
        # Write average cycle length
        f.write(f"Average Minimum Cycle Length: {avg_len:.2f}\n")
        f.write("\nMinimum Cycle Length Distribution:\n")
        # Write distribution data
        for length, count in sorted(dist.items()):
            f.write(f"Length {length}: {count} occurrences\n")


# --------------- 2025-05-27 分析GEBC 的加权分布------------------------


def save_data_to_txt(ks, g_rel, filename):
    """Save correlation function data to txt file"""
    with open(filename, 'w') as f:
        f.write('k\tg(k)\n')
        for k, g in zip(ks, g_rel):
            f.write(f'{k}\t{g}\n')


def plot_correlation_function(ks, g_rel, title, save_path):
    """Plot correlation function and save both plot and data"""
    plt.figure(figsize=(10, 6))
    plt.plot(ks, g_rel, 'b-', label='Correlation Function')
    plt.xlabel('k')
    plt.ylabel('g(k)')
    plt.title(title)
    plt.grid(True)
    plt.legend()

    # Save plot
    plt.savefig(save_path + '.png', dpi=300, bbox_inches='tight')
    plt.close()

    # Save data
    save_data_to_txt(ks, g_rel, save_path + '.txt')


def intial_end_GEBC_GEBC_m1_topo_PCF(base_path):
    # 定义目录路径
    graph_path = os.path.join(base_path, 'graph_series/')

    # 获取所有文件名（不包含目录）
    files = [f for f in os.listdir(graph_path) if os.path.isfile(
        os.path.join(graph_path, f))]

    # 对文件名进行排序
    files.sort()

    # 获取排序后的第一个和最后一个文件路径
    graph_path_initial = os.path.join(graph_path, files[0]) if files else None
    graph_path_end = os.path.join(graph_path, files[-1]) if files else None

    output_dir = os.path.join(
        base_path, 'analysis_topology/corr_func_GEBC_weighted')

    # Create output directory if it doesn't exist
    os.makedirs(output_dir, exist_ok=True)

    # 处理初始图和最终图
    for graph_path, suffix in [(graph_path_initial, 'initial'), (graph_path_end, 'end')]:
        # Load graph
        with open(graph_path, 'rb') as f:
            graph = pickle.load(f)

        # 获取最大连通子图
        largest_cc = max(nx.connected_components(graph), key=len)
        graph = graph.subgraph(largest_cc).copy()
        # 将 MultiGraph 转换为 Graph
        graph = nx.Graph(graph)

        # Randomly sample 5% of edges
        all_edges = list(graph.edges())
        n_edges = len(all_edges)
        n_sample = int(0.1 * n_edges)

        # 使用索引进行抽样，而不是直接对边进行抽样
        edge_indices = np.random.choice(n_edges, n_sample, replace=False)
        sampled_edges = [all_edges[i] for i in edge_indices]

        # Calculate GEBC weighted correlation function
        ks_GEBC, g_rel_GEBC = topological_pair_corr(
            graph, sampled_edges,
            sample_size=50, mass_weight_flag='GEBC'
        )

        # Calculate GEBC_m1 weighted correlation function
        ks_GEBC_m1, g_rel_GEBC_m1 = topological_pair_corr(
            graph, sampled_edges,
            sample_size=50, mass_weight_flag='GEBC_m1'
        )

        # Plot and save results
        plot_correlation_function(
            ks_GEBC, g_rel_GEBC,
            f'GEBC Weighted Correlation Function ({suffix})',
            os.path.join(output_dir, f'GEBC_weighted_correlation_{suffix}')
        )

        plot_correlation_function(
            ks_GEBC_m1, g_rel_GEBC_m1,
            f'GEBC_m1 Weighted Correlation Function ({suffix})',
            os.path.join(output_dir, f'GEBC_m1_weighted_correlation_{suffix}')
        )

    # print(f"Results saved to: {output_dir}")

# ---------------------- end ------------------------

# 2025-05-28 简单的的 GEBCs 的分布


def intial_end_distribution_GEBCs(mission_dir):
    """
    统计GEBC和GEBC_m1的初态和末态分布,并绘制直方图,保存图片和原始数据
    图片和数据均保存在mission_dir/analysis_topology/GEBCs_distribution下
    """

    # 1. 获取graph_series目录下的第一个和最后一个pkl文件
    graph_dir = os.path.join(mission_dir, 'graph_series')
    files = [f for f in os.listdir(graph_dir) if f.endswith('.pkl')]
    if not files:
        print(f"No pkl files found in {graph_dir}")
        return
    files.sort()
    graph_path_initial = os.path.join(graph_dir, files[0])
    graph_path_final = os.path.join(graph_dir, files[-1])

    # 2. 读取初始和末态图
    with open(graph_path_initial, 'rb') as f:
        G_initial = pickle.load(f)
    with open(graph_path_final, 'rb') as f:
        G_final = pickle.load(f)

    # 3. 提取GEBC和GEBC_m1属性
    def extract_gebc(graph, key):
        return [data.get(key, 0) for u, v, data in graph.edges(data=True)]

    gebc_init = extract_gebc(G_initial, 'GEBC')
    gebc_final = extract_gebc(G_final, 'GEBC')
    gebc_m1_init = extract_gebc(G_initial, 'GEBC_m1')
    gebc_m1_final = extract_gebc(G_final, 'GEBC_m1')

    # 4. 绘图和保存数据
    save_dir = os.path.join(
        mission_dir, 'analysis_topology', 'GEBCs_distribution')
    os.makedirs(save_dir, exist_ok=True)

    def plot_and_save(init_data, final_data, name):
        # 计算全局bin
        all_data = np.array(init_data + final_data)
        bins = 50
        bin_min, bin_max = np.min(all_data), np.max(all_data)
        hist_init, bin_edges = np.histogram(
            init_data, bins=bins, range=(bin_min, bin_max))
        hist_final, _ = np.histogram(
            final_data, bins=bins, range=(bin_min, bin_max))
        bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2

        # 计算平均值
        mean_init = np.mean(init_data) if len(init_data) > 0 else 0
        mean_final = np.mean(final_data) if len(final_data) > 0 else 0

        # 绘图
        plt.figure(figsize=(8, 6))
        plt.bar(bin_centers, hist_init, width=bin_edges[1]-bin_edges[0],
                alpha=0.5, label='Initial', color='skyblue', edgecolor='black')
        plt.bar(bin_centers, hist_final, width=bin_edges[1]-bin_edges[0],
                alpha=0.5, label='Final', color='salmon', edgecolor='black')
        plt.xlabel(name)
        plt.ylabel('Count')
        plt.title(f'{name} Distribution (Initial vs Final)')
        plt.legend()
        plt.grid(True)
        # 标注平均值
        plt.axvline(mean_init, color='blue', linestyle='--', linewidth=2, alpha=0.8,
                    label=f'Initial Mean: {mean_init:.3e}')
        plt.axvline(mean_final, color='red', linestyle='--', linewidth=2, alpha=0.8,
                    label=f'Final Mean: {mean_final:.3e}')
        # 重新设置图例，避免重复
        handles, labels = plt.gca().get_legend_handles_labels()
        by_label = dict(zip(labels, handles))
        plt.legend(by_label.values(), by_label.keys())
        out_png = os.path.join(save_dir, f'{name}_distribution.png')
        plt.savefig(out_png, dpi=300, bbox_inches='tight')
        plt.close()

        # 保存原始数据
        out_txt = os.path.join(save_dir, f'{name}_distribution.txt')
        with open(out_txt, 'w') as f:
            f.write('bin_center\tinitial_count\tfinal_count\n')
            for x, y1, y2 in zip(bin_centers, hist_init, hist_final):
                f.write(f'{x}\t{y1}\t{y2}\n')
            f.write(f'Initial Mean\t{mean_init}\n')
            f.write(f'Final Mean\t{mean_final}\n')

    plot_and_save(gebc_init, gebc_final, 'GEBC')
    plot_and_save(gebc_m1_init, gebc_m1_final, 'GEBC_m1')
    # print(f"GEBC/GEBC_m1 distributions saved to {save_dir}")

# ---------------------- end ------------------------

# 2025-05-29 建立距离矩阵


def build_distance_memmap_graph_tool(L_nx, filename, map_dtype=np.int32):
    """
    使用 graph-tool 的 shortest_distance 来构建下三角存盘的距离矩阵。

    Parameters
    ----------
    L_nx : networkx.Graph
        原始的无向图 (或有向图)，我们先转成 graph-tool.Graph。
    filename : str
        要写入的 memmap 文件路径（若已存在会覆盖）。
    map_dtype : numpy dtype
        存储距离的整数类型，默认 int32。

    Returns
    -------
    nodes : list
        按照写入顺序的节点列表。
    dist_map : np.memmap
        大小为 (n,n) 的 memmap，其中仅 [i,j] (i>=j) 被写入。
    """
    # 1. 拆解 networkx 图，构建 graph-tool.Graph
    nodes = list(L_nx.nodes())
    idx = {v: i for i, v in enumerate(nodes)}
    n = len(nodes)

    # 用 graph-tool 建图
    g = gt.Graph(directed=L_nx.is_directed())
    g.add_vertex(n)
    for u, v in L_nx.edges():
        gu = g.vertex(idx[u])
        gv = g.vertex(idx[v])
        g.add_edge(gu, gv)

    # 2. 创建 memmap，并初始化对角为 0
    if os.path.exists(filename):
        os.remove(filename)
    dist_map = np.memmap(filename, dtype=map_dtype, mode='w+', shape=(n, n))
    dist_map[:] = 0   # 直接填 0
    np.fill_diagonal(dist_map, 0)

    # 3. 对每个顶点 i 调用 graph-tool.shortest_distance
    for u, i in idx.items():
        # 返回一个 length-n 的 numpy array
        lengths = topology.shortest_distance(g, source=g.vertex(i))
        lengths = np.array(lengths.a, dtype=map_dtype)
        lengths[lengths == np.iinfo(map_dtype).max] = 0
        # 只写下三角部分
        dist_map[i, :i+1] = lengths[:i+1]

    dist_map.flush()
    return nodes, dist_map


def process_graph_and_save_distance(mission_path):
    """
    处理指定任务目录下的初始图数据（0-graph.pkl），计算并保存距离矩阵。

    Parameters
    ----------
    mission_path : str
        任务目录路径，例如 '/home/baixj/gel_network/1-crosslink_degree_space/string=bondingprob1.0/N1/'

    Returns
    -------
    str
        保存的距离矩阵文件路径
    """
    # 1. 构建图数据目录路径
    graph_series_dir = os.path.join(mission_path, 'graph_series')
    if not os.path.exists(graph_series_dir):
        raise FileNotFoundError(f"图数据目录不存在: {graph_series_dir}")

    # 2. 创建保存目录
    save_dir = os.path.join(mission_path, 'graph_distance')
    os.makedirs(save_dir, exist_ok=True)

    # 3. 获取并排序图数据文件，只取第一个（0-graph.pkl）
    graph_files = [f for f in os.listdir(
        graph_series_dir) if f.endswith('-graph.pkl')]
    graph_files.sort()  # 按文件名排序
    if not graph_files:
        raise FileNotFoundError(f"未找到图数据文件: {graph_series_dir}")

    graph_file = graph_files[0]  # 只取第一个文件
    if not graph_file.startswith('0-'):
        raise ValueError(f"第一个文件不是初始图数据: {graph_file}")

    # 构建完整的图数据文件路径
    graph_path = os.path.join(graph_series_dir, graph_file)
    print(f"处理初始图文件: {graph_file}")

    try:
        # 4. 读取图数据
        with open(graph_path, 'rb') as f:
            graph = pickle.load(f)

        # 5. 获取最大连通子图,并且简化
        graph = simplify_maximum_subgraph(graph)

        # 6. 转换为线图
        line_graph = nx.line_graph(graph)

        # 7. 构建保存文件名
        save_path = os.path.join(save_dir, '0-distance.dat')

        # 8. 计算并保存距离矩阵
        nodes, dist_map = build_distance_memmap_graph_tool(
            line_graph, save_path)

        # 9. 保存节点列表（用于后续查询）
        nodes_path = os.path.join(save_dir, '0-nodes.pkl')
        with open(nodes_path, 'wb') as f:
            pickle.dump(nodes, f)

        print(f"已保存距离矩阵: {save_path}")
        return save_path

    except Exception as e:
        print(f"处理文件 {graph_file} 时出错: {str(e)}")
        raise


# ---------------------- end 2025-05-29 ------------------------

def simplify_maximum_subgraph(graph):
    """
    获取图的最大连通子图并简化为无向简单图。

    Parameters
    ----------
    graph : networkx.Graph or networkx.MultiGraph
        输入图

    Returns
    -------
    networkx.Graph
        简化后的最大连通子图
    """
    # 获取最大连通子图
    largest_cc = max(nx.connected_components(graph), key=len)
    graph = graph.subgraph(largest_cc).copy()
    # 将 MultiGraph 转换为 Graph
    graph = nx.Graph(graph)
    return graph
