import os
import time
from concurrent.futures import ProcessPoolExecutor, as_completed
import pickle
import networkx as nx
import random
import math
import matplotlib.pyplot as plt

import declares
import GraphTools.graph_method as gm
import GraphTools.frame_series_method as fm
import GraphTools.network_method as nm
import GraphTools.basic_method as bm

# 对一个 有坐标的 graph 断裂的函数 输入是一个 graph
# 输出是一个断裂后的graph
# input有 N_break ,method=linear or  boltzmann
# graph 要加入若干宏观属性,


def extract_PBC_edges(graph, graph_mode='3D'):
    """2025-04-12
    找出跨越边界的edge, 分为2D 网络和 3D 网络处理"""

    if graph_mode == '2D':
        # 2D 网络
        return extract_edges_2D(graph)
    elif graph_mode == '3D':
        # 3D 网络
        return extract_edges_real_network(graph)
    else:
        return '网络类型错误'


def extract_edges_real_network(graph):
    """
    提取图中所有边,并将其分为不跨越 PBC 的边和跨越 PBC 的边.
    利用每个边属性中 u_atom_list 里原子对象的周期映像标记 (nx, ny, nz)
    判断 atomlist 这一条高分子链中,第一个与第二个原子连接的过程中是否跨越了 PBC.

    输入:
        graph: 一个 NetworkX 图,图的节点 ID 与数据文件中的原子 ID 一致(从 1 开始计数).
               每条边的属性为一个字典,结构类似:
               {
                   0: {
                       'chain_id': '5972',
                       'chain_len': 15,
                       'atomlist': [1683, 92580, ..., 1527],
                       'bondlist': [...],
                       'u_atom_list': {
                           1683: <Atom 1683: ...>,
                           92580: <Atom 92580: ...>,
                           ...,
                           1527: <Atom 1527: ...>
                       }
                   }
               }

    输出:
        返回一个元组 ((non_pbc_edges_x, non_pbc_edges_y, non_pbc_edges_z), pbc_edges)
        其中 non_pbc_edges_* 为可直接用于绘图的坐标列表(每条边用起点和终点坐标表示,并以 None 分隔),
        pbc_edges 为所有跨越 PBC 的边列表,每个元素为 (起始原子ID, 终止原子ID).
        经过调整, 起始原子ID的x坐标小,  终止原子ID 的x坐标大
    """
    non_pbc_edges_x = []
    non_pbc_edges_y = []
    non_pbc_edges_z = []
    pbc_edges = []

    box = bm.get_box_dimensions(graph.graph['box'])
    Lx = box[0]
    # 注意:graph 是一个 MultiGraph,所以使用 edges(keys=True, data=True)
    for n1, n2, key, edge_attr in graph.edges(keys=True, data=True):
        # 每条边可能包含多个链(以不同的 key 标识),遍历每个链
        atomlist = edge_attr.get('atomlist')
        u_atom_list = edge_attr.get('u_atom_list')
        if not atomlist or not u_atom_list:
            continue
        # 至少需要两个原子才能判断第一段键
        if len(atomlist) < 2:
            continue

        # 判断 atomlist 中的链在x方向和是同侧还是不同侧
        posi_xs = [u_atom_list[atom_id].position[0] for atom_id in atomlist]
        cross_flag = bm.trajectory_endpoints_side(posi_xs, Lx)

        atom1 = u_atom_list[atomlist[0]]

        if cross_flag == 'different':
            # 若任一方向的周期映像标记不同,则认为该链跨越了 PBC
            # 获取 n1 和 n2 对应的原子对象
            atom_n1 = u_atom_list.get(n1)
            atom_n2 = u_atom_list.get(n2)
            if atom_n1 is None or atom_n2 is None:
                #! check
                print('error: atom_n1 or atom_n2 is None')
                continue
            # 根据 x 坐标排序:x 坐标小的放左边,x 坐标大的放右边
            if atom_n1.position[0] <= atom_n2.position[0]:
                pbc_edges.append((n1, n2, key))
            else:
                pbc_edges.append((n2, n1, key))
        else:
            # 不跨越 PBC 的边,利用链的起始与终止原子坐标绘图
            pos1 = atom1.position  # 假定返回 [x, y, z]
            atom_last = u_atom_list.get(atomlist[-1])
            if atom_last is None:
                continue
            pos2 = atom_last.position
            non_pbc_edges_x.extend([pos1[0], pos2[0], None])
            non_pbc_edges_y.extend([pos1[1], pos2[1], None])
            non_pbc_edges_z.extend([pos1[2], pos2[2], None])

    return (non_pbc_edges_x, non_pbc_edges_y, non_pbc_edges_z), pbc_edges


def extract_edges_2D(graph):
    """
    根据规则网格构造周期性边界下的边:
    对于每一行 y,从 x 最大的节点 (grid_x-1, y) 反向连接到 x=0 的节点 (0, y).
    返回的边确保较小 x 坐标的节点位于边元组的首位.

    参数:
        graph : 一个 NetworkX 图对象,其中节点应为 (x, y) 元组,
                或节点具备 'position' 属性 (本例假设节点直接为 (x, y)).

    返回:
        periodic_edges : 列表,每个元素形如 (node_left, node_right, key) 的元组,
                         其中 node_left 为 (0, y), node_right 为 (grid_x-1, y).
                         这里 key 默认取 0, 如有多重边情况可以做调整.
    """
    # 从图的 'box' 属性中读取网格尺寸
    grid_x = graph.graph['box'][3]
    grid_y = graph.graph['box'][4]

    periodic_edges = []
    for j in range(grid_y):
        left_node = (0, j)
        right_node = (grid_x - 1, j)
        # 先确认节点存在
        if left_node in graph and right_node in graph:
            # 对于无向图,检查两个方向;对于有向图,根据具体需求决定是否接受反向边存在的情况
            if graph.has_edge(left_node, right_node) or graph.has_edge(right_node, left_node):
                # 按照要求,较小 x 的节点要在前面,这里 left_node 确保为 (0, j)
                edge = (left_node, right_node, 0)
                periodic_edges.append(edge)
    # 返回一个示例边 (0, 0, 0) 和构造的周期性边列表
    return (0, 0, 0), periodic_edges



def remove_pbc_edges(graph, pbc_edges):
    """
    从图中移除所有跨越 PBC 的边,返回新的图.
    :param graph: 原始 MultiGraph
    :param pbc_edges: 跨越 PBC 的边列表,每个元素为 (起始原子ID, 终止原子ID, key)
    :return: 移除周期边后的图 graph_uPBC_x
    """
    # 复制一份原图,避免直接修改原图
    graph_copy = graph.copy()
    for (u, v, key) in pbc_edges:
        if graph_copy.has_edge(u, v, key):
            graph_copy.remove_edge(u, v, key)
    return graph_copy


def shift_graph_x(graph, Lx):
    """
    复制图,并将所有节点的 x 坐标平移 Lx,返回平移后的图和节点映射关系.
    :param graph: 移除了 pbc 边的图 graph_uPBC_x
    :param Lx: x 方向盒子的长度
    :return: (平移后的图, mapping),其中 mapping 为原节点ID -> 新节点ID 的字典
    """
    import networkx as nx
    new_graph = nx.MultiGraph()
    mapping = {}

    # 复制所有节点,并平移坐标
    position_key = detect_position_key(graph)
    for node, data in graph.nodes(data=True):
        new_node = f"{node}_shifted"  # 新的节点ID
        mapping[node] = new_node
        mapping[new_node] = node
        new_data = data.copy()
        # 假设节点坐标存放在 data['posi'] 中,格式为 (x, y, z)
        if position_key in new_data:
            x, y, z = new_data[position_key]
            new_data[position_key] = (x + Lx, y, z)
        new_graph.add_node(new_node, **new_data)

    # 复制所有边
    for u, v, key, data in graph.edges(data=True, keys=True):
        new_u = mapping[u]
        new_v = mapping[v]
        new_graph.add_edge(new_u, new_v, key=key, **data)

    return new_graph, mapping


def merge_graphs(graph1, graph2):
    """
    将 graph2 的 nodes and edges 加入 graph1
    :param graph1: 原始图(已去除 pbc 边)
    :param graph2: 平移后的图
    :return: 合并后的新图
    """
    # 添加 graph2 的所有节点和边到 graph1
    graph1.add_nodes_from(graph2.nodes(data=True))
    graph1.add_edges_from(graph2.edges(data=True, keys=True))
    return graph1


def add_pbc_edges(merged_graph, pbc_edges, mapping):
    """
    在合并图中添加新的跨越 PBC 的边,连接原始图中的起始原子和平移图中对应的终止原子.
    :param merged_graph: 合并后的图
    :param pbc_edges: 跨越 PBC 的边列表,每个元素为 (起始原子ID, 终止原子ID, key)
    :param mapping: 原始节点ID到平移后节点ID的映射字典
    :return: 添加完边后的图
    """
    # list(merged_graph.neighbors(785))
    for (start_id, end_id, key) in pbc_edges:
        # 连接原始图中的起始原子与平移图中对应的终止原子
        # if merged_graph.has_node(end_id) and merged_graph.has_node(mapping.get(start_id, None)):
        if merged_graph.has_node(end_id) and merged_graph.has_node(mapping.get(start_id)):
            merged_graph.add_edge(end_id, mapping[start_id], key)
        else:
            print('error!1123ddsf34')
    biggest_sub_graph, node_in_subgraph = retain_largest_subgraph(merged_graph)
    return biggest_sub_graph, node_in_subgraph


def PBCx_graph(graph, graph_mode=''):
    # 得到一个左断开, 右侧 copy 一份的合并 graph
    (_, _, _), pbc_edges = extract_PBC_edges(graph, graph_mode='2D')
    box = graph.graph['box']
    x_min, y_min, z_min, x_max, y_max, z_max = box[:6]
    Lx = x_max - x_min
    # Step 1: 移除周期性边
    graph_uPBC_x = remove_pbc_edges(graph, pbc_edges)

    # Step 2: 复制并平移图(x方向)
    graph_uPBC_x_move, mapping = shift_graph_x(graph_uPBC_x, Lx)

    # Step 3: 合并原始图与平移后的图
    merged_graph = merge_graphs(graph_uPBC_x, graph_uPBC_x_move)

    # Step 4: 重新建立周期性连接边
    merged_graph, node_in_subgraph = add_pbc_edges(
        merged_graph, pbc_edges, mapping)

    # 信息写入 merged_graph
    merged_graph.graph['mapping'] = mapping
    merged_graph.graph['node_in_subgraph'] = node_in_subgraph
    merged_graph.graph['pbc_edges'] = pbc_edges
    return merged_graph

# 2025-03-18 只保留最大的子图, 删除其余的


def retain_largest_subgraph(G):
    """
    输入图 G,返回只保留最大连通子图的新图
    """
    if len(G.nodes()) == 0:
        return G  # 空图直接返回

    # 获取所有连通分量,每个分量是一个节点集合
    components = list(nx.connected_components(G))
    # 选取节点数量最多的连通分量
    largest_component = max(components, key=len)

    # 将所有其他子图的节点存储到 node_in_subgraph,使用集合去重
    node_in_subgraph = set(node for component in components if component !=
                           largest_component for node in component)
    # 返回该连通分量对应的子图(复制一份新的图)
    return G.subgraph(largest_component).copy(), node_in_subgraph

# 2025-03-20


def normalize_edge(edge):
    """
    对于无向图,将边标准化为 (min, max) 的形式;
    有向图则直接返回原边.
    """
    u, v = edge
    return (u, v) if u <= v else (v, u)


def generate_all_shortest_paths(s, t, predecessors):
    """
    递归回溯,根据 predecessors 字典生成从 s 到 t 的所有最短路径.
    predecessors 由 nx.predecessor(graph, s) 得到,
    返回一个包含节点序列的列表,每个序列表示一条路径.
    """
    if s == t:
        return [[s]]
    if t not in predecessors:
        return []  # 不可达
    paths = []
    for pred in predecessors[t]:
        for path in generate_all_shortest_paths(s, pred, predecessors):
            paths.append(path + [t])
    return paths


def compute_short_paths_and_edge_mapping_no_parallel(graph):
    """
    针对给定图 graph,计算所有节点对之间的所有最短路径,并构造边到路径的反向索引.

    返回:
      - short_paths: 字典,键为 (s,t) 节点对,值为所有从 s 到 t 的最短路径(每条路径为节点序列)
      - edge_to_paths: 字典,键为标准化后的边,值为列表,每个元素 ((s,t), path_index)
    """
    short_paths = {}
    edge_to_paths = {}

    for s in graph.nodes():
        distances = nx.single_source_shortest_path_length(graph, s)
        predecessors = nx.predecessor(graph, s)

        for t in graph.nodes():
            if s == t or t not in distances:
                continue
            paths = generate_all_shortest_paths(s, t, predecessors)
            if paths:
                short_paths[(s, t)] = paths
                for i, path in enumerate(paths):
                    # 将节点序列转换为边序列
                    edge_path = [(path[k], path[k+1])
                                 for k in range(len(path) - 1)]
                    for edge in edge_path:
                        edge_norm = normalize_edge(edge)
                        edge_to_paths.setdefault(
                            edge_norm, []).append(((s, t), i))
    return short_paths, edge_to_paths


# 将 process_source 定义在顶层,确保它是可 pickle 的


def process_source(args):
    start_time = time.time()  # Start timing
    graph, s = args
    local_short_paths = {}
    local_edge_to_paths = {}
    distances = nx.single_source_shortest_path_length(graph, s)
    predecessors = nx.predecessor(graph, s)
    for t in graph.nodes():
        if s == t or t not in distances:
            continue
        paths = generate_all_shortest_paths(s, t, predecessors)
        if paths:
            local_short_paths[(s, t)] = paths
            for i, path in enumerate(paths):
                # 将节点序列转换为边序列
                edge_path = [(path[k], path[k+1])
                             for k in range(len(path) - 1)]
                for edge in edge_path:
                    edge_norm = normalize_edge(edge)
                    local_edge_to_paths.setdefault(
                        edge_norm, []).append(((s, t), i))
    end_time = time.time()  # End timing
    # print(f"Time cost for processing source {s}: {end_time - start_time:.4f} seconds")
    return local_short_paths, local_edge_to_paths


def compute_short_paths_and_edge_mapping_parallel(graph):
    """
    针对给定图 graph,采用并行方式计算所有节点对之间的所有最短路径,
    并构造边到路径的反向索引.

    返回:
      - short_paths: 字典,键为 (s, t) 节点对,值为所有从 s 到 t 的最短路径(每条路径为节点序列)
      - edge_to_paths: 字典,键为标准化后的边,值为列表,每个元素 ((s,t), path_index)
    """
    short_paths = {}
    edge_to_paths = {}

    # 获取可用的 CPU 核心数
    num_cores = os.cpu_count()
    print("可用的最大核心数:", num_cores)

    # 构造任务列表,每个任务传入 (graph, s)
    tasks = [(graph, s) for s in graph.nodes()]
    with ProcessPoolExecutor(max_workers=num_cores) as executor:
        futures = {executor.submit(process_source, task)                   : task for task in tasks}
        for future in as_completed(futures):
            sp, etp = future.result()
            short_paths.update(sp)
            for edge, mapping in etp.items():
                edge_to_paths.setdefault(edge, []).extend(mapping)
    return short_paths, edge_to_paths


def process_source_batch_1(args):
    """
    处理一个批次的源节点.
    输入:
      - graph: 图对象
      - sources: 一批源节点列表
    输出:
      - local_short_paths: 字典,键为 (s, t) 节点对,值为所有从 s 到 t 的最短路径(每条路径为节点序列)
      - local_edge_to_paths: 字典,键为标准化后的边,值为列表,每个元素 ((s, t), path_index)
    """
    start_time = time.time()  # 开始计时
    graph, sources = args
    local_short_paths = {}
    local_edge_to_paths = {}
    for s in sources:
        distances = nx.single_source_shortest_path_length(graph, s)
        predecessors = nx.predecessor(graph, s)
        for t in graph.nodes():
            if s == t or t not in distances:
                continue
            paths = generate_all_shortest_paths(s, t, predecessors)
            if paths:
                local_short_paths[(s, t)] = paths
                for i, path in enumerate(paths):
                    # 将节点序列转换为边序列
                    edge_path = [(path[k], path[k+1])
                                 for k in range(len(path) - 1)]
                    for edge in edge_path:
                        edge_norm = normalize_edge(edge)
                        local_edge_to_paths.setdefault(
                            edge_norm, []).append(((s, t), i))
    end_time = time.time()  # 结束计时
    # print(f"批次 {sources} 的处理耗时: {end_time - start_time:.4f} 秒")
    return local_short_paths, local_edge_to_paths


def process_source_batch_2(args):
    """
    处理一批 (s,t) 节点对,前提条件:图为无向图,且每个 (s,t) 满足 s < t.
    利用 networkx.all_shortest_paths 直接计算所有最短路径,
    并构造反向索引(edge_to_paths),其中 edge 被规范化(normalize_edge).

    返回:
      local_short_paths: 字典,键为 (s,t) 节点对,值为字典 { "paths": [路径列表], "count": 最短路径数量 }.
      local_edge_to_paths: 字典,键为规范化后的边,值为列表,每个元素为 ((s,t), path_index).
    """
    import networkx as nx
    graph, pairs = args  # pairs 为 (s,t) 的列表,且满足 s < t
    local_short_paths = {}
    local_edge_to_paths = {}
    for s, t in pairs:
        try:
            paths = list(nx.all_shortest_paths(graph, source=s, target=t))
        except nx.NetworkXNoPath:
            continue  # 无法到达则跳过
        if paths:
            # 将路径列表和路径数量一起存入
            local_short_paths[(s, t)] = {"paths": paths, "count": len(paths)}
            for i, path in enumerate(paths):
                # 将路径转化为边序列
                edge_path = [(path[k], path[k+1])
                             for k in range(len(path) - 1)]
                for edge in edge_path:
                    # 规范化边 (保证 (u,v) 与 (v,u) 统一)
                    edge_norm = normalize_edge(edge)
                    local_edge_to_paths.setdefault(
                        edge_norm, []).append(((s, t), i))
    return local_short_paths, local_edge_to_paths


def compute_short_paths_and_edge_mapping_parallel_1(graph):
    """
    针对给定图 graph,采用并行方式计算所有节点对之间的所有最短路径,
    并构造边到路径的反向索引.

    返回:
      - short_paths: 字典,键为 (s, t) 节点对,值为所有从 s 到 t 的最短路径(每条路径为节点序列)
      - edge_to_paths: 字典,键为标准化后的边,值为列表,每个元素 ((s,t), path_index)
    """
    short_paths = {}
    edge_to_paths = {}

    # 获取可用的 CPU 核心数
    num_cores = os.cpu_count()
    print("可用的最大核心数:", num_cores)

    # 计算网络的节点数量,平均分配,每个批次包含的节点数(向上取整)
    nodes = list(graph.nodes())
    num_nodes = len(nodes)
    batch_size = math.ceil(num_nodes / num_cores)
    print("每个批次的节点数:", batch_size)

    # 将所有源节点分批,每批 batch_size 个
    tasks = [(graph, nodes[i:i + batch_size])
             for i in range(0, num_nodes, batch_size)]

    with ProcessPoolExecutor(max_workers=num_cores) as executor:
        futures = {executor.submit(
            process_source_batch_1, task): task for task in tasks}
        for future in as_completed(futures):
            sp, etp = future.result()
            short_paths.update(sp)
            for edge, mapping in etp.items():
                edge_to_paths.setdefault(edge, []).extend(mapping)
    return short_paths, edge_to_paths


def compute_short_paths_and_edge_mapping_parallel_2(graph):
    """
    针对无向图,计算所有 (s,t) (要求 s < t)最短路径,并构造反向边到路径映射,
    使用并行方式处理 N*(N-1)/2 个任务,保证任务均匀分配到各个 CPU 上.

    返回:
      short_paths: 字典,每个键为 (s,t) 节点对(s < t),值为字典 { "paths": [最短路径列表], "count": 最短路径数量 }.
      edge_to_paths: 字典,键为规范化后的边,值为列表,每个元素为 ((s,t), path_index).
    """
    import math
    from concurrent.futures import ProcessPoolExecutor, as_completed
    # 确保节点顺序固定(排序后满足 s < t 判断)
    nodes = sorted(graph.nodes())
    pairs = []
    # 构造所有 (s,t) 对,要求 s < t
    for i, s in enumerate(nodes):
        for t in nodes[i+1:]:
            pairs.append((s, t))

    total_pairs = len(pairs)
    num_cores = os.cpu_count() or 1
    batch_size = math.ceil(total_pairs / num_cores)
    # 将所有 (s,t) 对均分为若干批,每批传给一个子进程
    tasks = [(graph, pairs[i:i+batch_size])
             for i in range(0, total_pairs, batch_size)]

    short_paths = {}
    edge_to_paths = {}
    with ProcessPoolExecutor(max_workers=num_cores) as executor:
        futures = {executor.submit(
            process_source_batch_2, task): task for task in tasks}
        for future in as_completed(futures):
            sp, etp = future.result()
            short_paths.update(sp)
            for edge, mapping in etp.items():
                edge_to_paths.setdefault(edge, []).extend(mapping)
    return short_paths, edge_to_paths


def update_short_paths_and_edge_mapping(sp, edge_to_paths, broken_edges, graph):
    """
    根据 broken_edges(列表中的每个边均采用原始表示,函数内部统一标准化)调整已计算好的 sp 和 edge_to_paths,
    使得其中删除了含有被破坏边的最短路径.

    调整思路:
      1. 对每个 (s,t) 对在 sp 中,遍历所有路径,检查该路径是否包含任一被删除的边.
         如果包含,则将其剔除.
      2. 如果某个 (s,t) 对删除后没有剩余路径,则利用更新后的图重新计算该对的所有最短路径.
         如果依然没有路径,则直接将该 (s,t) 对从 sp 中删除(保持与全量重计算一致).
      3. 对于发生变化的 (s,t) 对,在 edge_to_paths 中删除所有包含该 (s,t) 的记录,
         然后根据新的 sp 重新构造映射记录.

    返回更新后的 sp 和 edge_to_paths.
    """
    # 构造标准化的 broken_edge 集合
    broken_set = set(normalize_edge(e) for e in broken_edges)
    affected_path = set()  # 记录那些 (s,t) 对发生了更新

    # 对 sp 中的每个 (s,t) 对更新路径列表
    for pair in list(sp.keys()):
        s, t = pair
        original_paths = sp[pair]
        new_paths = []
        for path in original_paths:
            # 计算该路径中的所有边(标准化)
            edge_path = [normalize_edge((path[i], path[i+1]))
                         for i in range(len(path) - 1)]
            # 如果该路径中不包含任何被破坏的边,则保留该路径
            if not any(e in broken_set for e in edge_path):
                new_paths.append(path)
        # 如果新路径数目与原来不同,则需要更新
        if len(new_paths) != len(original_paths):
            # 若删除后无剩余路径,则重新计算 (s,t) 的所有最短路径
            if not new_paths:
                distances = nx.single_source_shortest_path_length(graph, s)
                if t in distances:
                    preds = nx.predecessor(graph, s)
                    new_paths = generate_all_shortest_paths(s, t, preds)
                else:
                    new_paths = []
            # 若重新计算后依然无路径,则删除该 (s,t) 键(与全量重计算保持一致)
            if not new_paths:
                del sp[pair]
            else:
                sp[pair] = new_paths
            affected_path.add(pair)

    # 更新 edge_to_paths:对于所有发生变化的 (s,t) 对,
    # 先在 edge_to_paths 中删除所有关于该 (s,t) 的记录
    for edge in list(edge_to_paths.keys()):
        new_occurrences = [occ for occ in edge_to_paths[edge]
                           if occ[0] not in affected_path]
        if new_occurrences:
            edge_to_paths[edge] = new_occurrences
        else:
            del edge_to_paths[edge]

    # 对于每个更新过的 (s,t) 对,重新构建其所有路径的边映射记录
    for pair in affected_path:
        # 只有仍在 sp 中的 (s,t) 对才重新添加映射记录
        if pair in sp:
            s, t = pair
            for idx, path in enumerate(sp[pair]):
                edge_path = [normalize_edge((path[i], path[i+1]))
                             for i in range(len(path) - 1)]
                for edge in edge_path:
                    edge_to_paths.setdefault(edge, []).append((pair, idx))

    return sp, edge_to_paths


def update_short_paths_and_edge_mapping_2(sp, edge_to_paths, broken_edges, graph):
    """
    根据 broken_edges(列表中的每个边采用原始表示,函数内部统一标准化)调整已计算好的 sp 和 edge_to_paths,
    利用 edge_to_paths 中存储的 (s,t) 对和路径索引,直接从 sp 中删除包含被破坏边的最短路径,
    而无需遍历整个 sp.

    sp 的结构为:{ (s,t): {"paths": [路径列表], "count": 路径数量} },且 (s,t) 保证 s < t.
    edge_to_paths 为:{ normalized_edge: [ ((s,t), path_index), ... ] }
    """
    import networkx as nx
    # 构造标准化的 broken_edge 集合
    broken_set = set(normalize_edge(e) for e in broken_edges)

    # updates: 对于每个 (s,t) 对,记录所有需要剔除的路径索引
    updates = {}
    for edge in broken_set:
        if edge in edge_to_paths:
            for (pair, path_index) in edge_to_paths[edge]:
                updates.setdefault(pair, set()).add(path_index)

    affected_path = set(updates.keys())

    # 针对每个受影响的 (s,t) 对,直接在 sp 中剔除对应的路径
    for pair in affected_path:
        if pair not in sp:
            continue
        s, t = pair
        sp_entry = sp[pair]
        old_paths = sp_entry["paths"]
        # 过滤掉需要剔除的路径
        new_paths = [p for idx, p in enumerate(
            old_paths) if idx not in updates[pair]]
        # 如果剔除后没有剩余路径,则尝试重新计算
        if not new_paths:
            try:
                new_paths = list(nx.all_shortest_paths(
                    graph, source=s, target=t))
            except nx.NetworkXNoPath:
                new_paths = []
        if not new_paths:
            # 若重新计算后依然无路径,则删除该 (s,t) 对
            del sp[pair]
        else:
            sp[pair] = {"paths": new_paths, "count": len(new_paths)}

    # 更新 edge_to_paths:先删除所有涉及受影响 (s,t) 对的记录
    for edge in list(edge_to_paths.keys()):
        new_occurrences = [occ for occ in edge_to_paths[edge]
                           if occ[0] not in affected_path]
        if new_occurrences:
            edge_to_paths[edge] = new_occurrences
        else:
            del edge_to_paths[edge]

    # 对于每个更新过的 (s,t) 对(仍保留在 sp 中),重建其路径对应的边映射
    for pair in affected_path:
        if pair not in sp:
            continue
        new_paths = sp[pair]["paths"]
        for idx, path in enumerate(new_paths):
            edge_path = [normalize_edge((path[i], path[i+1]))
                         for i in range(len(path) - 1)]
            for edge in edge_path:
                edge_to_paths.setdefault(edge, []).append((pair, idx))

    return sp, edge_to_paths


def update_short_paths_and_edge_mapping_3(sp, edge_to_paths, broken_edges, graph):
    """
    根据 broken_edges(列表中的每个边采用原始表示,内部统一用 normalize_edge 处理)调整已计算好的 sp 与 edge_to_paths,
    利用 edge_to_paths 中存储的 (s,t) 对和路径索引,直接从 sp 中删除包含被破坏边的最短路径,而无需遍历整个 sp.

    sp 的结构为:{ (s,t): {"paths": [路径列表], "count": 路径数量} }(要求 (s,t) 中 s < t)
    edge_to_paths 的结构为:{ normalized_edge: [ ((s,t), path_index), ... ] }

    最后,本函数收集所有被删除的路径中的边(即包含被破坏边的路径的边)并返回.
    """
    import networkx as nx
    # 构造 broken_edges 的规范化集合
    broken_set = set(normalize_edge(e) for e in broken_edges)

    # updates:记录每个 (s,t) 对中需要删除的路径索引集合
    updates = {}
    for edge in broken_set:
        if edge in edge_to_paths:
            for (pair, path_index) in edge_to_paths[edge]:
                updates.setdefault(pair, set()).add(path_index)
        else:
            print('不应该有这个情况')

    affected_path = set(updates.keys())
    removed_edges = set()  # 用于记录被移除路径中的边

    # 对每个受影响的 (s,t) 对,处理其路径
    for pair in affected_path:
        if pair not in sp:
            continue
        s, t = pair
        sp_entry = sp[pair]
        old_paths = sp_entry["paths"]
        # 分离出被移除的路径和保留的路径
        removed_paths = [p for idx, p in enumerate(
            old_paths) if idx in updates[pair]]
        remaining_paths = [p for idx, p in enumerate(
            old_paths) if idx not in updates[pair]]

        # 记录被移除路径中的所有边
        for path in removed_paths:
            for i in range(len(path) - 1):
                removed_edges.add(normalize_edge((path[i], path[i+1])))

        # 如果没有剩余路径,则尝试重新计算该 (s,t) 对的最短路径
        if not remaining_paths:
            try:
                new_paths = list(nx.all_shortest_paths(
                    graph, source=s, target=t))
            except nx.NetworkXNoPath:
                new_paths = []
            if new_paths:
                sp[pair] = {"paths": new_paths, "count": len(new_paths)}
            else:
                del sp[pair]
        else:
            sp[pair] = {"paths": remaining_paths,
                        "count": len(remaining_paths)}

    # 更新 edge_to_paths:删除所有涉及受影响 (s,t) 对的记录
    for edge in list(edge_to_paths.keys()):
        new_occurrences = [occ for occ in edge_to_paths[edge]
                           if occ[0] not in affected_path]
        if new_occurrences:
            edge_to_paths[edge] = new_occurrences
        else:
            del edge_to_paths[edge]

    # 对于每个更新过的 (s,t) 对(仍保留在 sp 中),重建其路径对应的边映射记录
    for pair in affected_path:
        if pair not in sp:
            continue
        new_paths = sp[pair]["paths"]
        for idx, path in enumerate(new_paths):
            edge_path = [normalize_edge((path[i], path[i+1]))
                         for i in range(len(path) - 1)]
            for edge in edge_path:
                edge_to_paths.setdefault(edge, []).append((pair, idx))

    # 返回更新后的 sp,edge_to_paths,以及被移除的路径中的边 removed_edges
    return sp, edge_to_paths, affected_path, removed_edges


def compute_gebc_from_short_paths(short_paths, edge_to_paths, graph):
    """
    基于 precomputed 的 short_paths 和 edge_to_paths,
    计算每条边的 edge betweenness centrality(GEBC)的原始值,
    公式为:GEBC(e) = sum_(s,t) [ (# SP 中包含 e 的次数) / (总 SP 数量) ].

    对于无向图,由于遍历所有有序节点对会双计,需要除以 2.

    返回一个字典,键为标准化后的边,值为 GEBC 值.
    """
    gebc = {normalize_edge(edge): 0.0 for edge in graph.edges()}

    for e_norm, occurrences in edge_to_paths.items():
        for (s_t, path_index) in occurrences:
            total_paths = len(short_paths[s_t])
            gebc[e_norm] += 1.0 / total_paths

    # 对于无向图,每个 (s,t) 对被计算了两次,除以2以获得正确值
    if not graph.is_directed():
        for e in gebc:
            gebc[e] /= 2.0
    return gebc


def compute_gebc_from_short_paths_1(short_paths, edge_to_paths, graph):
    """
    基于 precomputed 的 short_paths 和 edge_to_paths 计算 GEBC,
    公式:GEBC(e) = sum_(s,t) [ (# SP 中包含 e 的次数) / (总 SP 数量) ].
    对于无向图,由于遍历所有有序节点对会双计,需要除以 2.

    优化点:
      1. 预先计算每个 (s,t) 对的路径数量,避免在内层循环中反复调用 len(short_paths[s_t]);
      2. 如果后续 update 逻辑允许,可以考虑将 short_paths[(s,t)] 从列表转换为 set(例如 set of frozensets),以便快速删除指定路径.
         但为了兼容 update_short_paths_and_edge_mapping 模块,这里保持原有列表及索引结构.
      3. 保持 edge_to_paths 的结构不变,同时在其中存储 (s,t) 和 path_index,可直接引用对应路径.

    返回值:
      一个字典,键为标准化后的边,值为 GEBC 值.
    """
    # 初始化 GEBC,每条边的初始值设为 0.0
    gebc = {normalize_edge(edge): 0.0 for edge in graph.edges()}

    start_time = time.time()  # 开始计时

    # 预先缓存每个 (s,t) 对的路径数量,避免反复调用 len()
    sp_counts = {pair: len(paths) for pair, paths in short_paths.items()}
    print(f"Graph 总节点数: {graph.number_of_nodes()}")
    end_time = time.time()  # 结束计时
    print(f"short_paths 长度: {len(short_paths)}")
    print(f"edge_to_paths 长度: {len(edge_to_paths)}")
    print(f"代码运行时间: {end_time - start_time:.4f} 秒")

    # 遍历 edge_to_paths 的所有记录,对每个 (s,t) 对累加贡献
    for e_norm, occurrences in edge_to_paths.items():
        for (s_t, path_index) in occurrences:
            total_paths = sp_counts[s_t]

            gebc[e_norm] += 1.0 / total_paths
            # #! 一个奇怪的错误?
            # gebc[e_norm] += (path_index+ 1) / total_paths

    # 对于无向图,由于每个 (s,t) 对会被双计,最后需要除以 2
    if not graph.is_directed():
        for e in gebc:
            gebc[e] /= 2.0

    return gebc


def compute_gebc_from_short_paths_2(short_paths, edge_to_paths, graph):
    """
    基于新的 sp 数据结构计算 GEBC.

    sp 的结构为:
      { (s,t): {"paths": [最短路径列表], "count": 最短路径数量} }
    edge_to_paths 的结构为:
      { normalized_edge: [ ((s,t), path_index), ... ] }

    GEBC(e) 的计算公式为:
      GEBC(e) = sum_(s,t)[ (该边在 (s,t) 的最短路径中出现的次数) / (总最短路径数量) ]
    对于无向图,每个 (s,t) 对计算会双计,最后需要除以 2.
    """
    # 初始化每条边的 GEBC 值为 0.0
    gebc = {normalize_edge(edge): 0.0 for edge in graph.edges()}

    # 遍历 edge_to_paths 中的所有记录
    for e_norm, occurrences in edge_to_paths.items():
        for (s_t, path_index) in occurrences:
            total_paths = short_paths[s_t]["count"]
            # 对于每个出现,该边贡献 1/总路径数
            gebc[e_norm] += 1.0 / total_paths

    return gebc


def compute_gebc_from_short_paths_3_old(short_paths, edge_to_paths, all_sp_edges, graph):
    """
    基于新的 sp 数据结构计算 GEBC,仅针对 all_sp_edges 中的边进行更新.

    sp 的结构为:
      { (s,t): {"paths": [最短路径列表], "count": 最短路径数量} }
    edge_to_paths 的结构为:
      { normalized_edge: [ ((s,t), path_index), ... ] }

    GEBC(e) 的计算公式为:
      GEBC(e) = sum_(s,t)[ (该边在 (s,t) 的最短路径中出现的次数) / (总最短路径数量) ]
    对于无向图,每个 (s,t) 对可能会双计,最后需要除以 2.

    仅对 all_sp_edges 中出现的边更新 GEBC.
    """
    # 仅初始化 all_sp_edges 中的边,设置初始 GEBC 值为 0.0
    gebc = {edge: 0.0 for edge in all_sp_edges}

    # 遍历 edge_to_paths 中的记录,仅处理 all_sp_edges 中的边
    for e_norm, occurrences in edge_to_paths.items():
        if e_norm not in all_sp_edges:
            continue
        for (s_t, path_index) in occurrences:
            total_paths = short_paths[s_t]["count"]
            gebc[e_norm] += 1.0 / total_paths

    # 对于无向图,每对 (s,t) 可能重复计数,因此需要除以2
    gebc = {edge: value / 2.0 for edge, value in gebc.items()}

    # 更新图中边的 GEBC 属性,仅对 all_sp_edges 中的边进行更新
    nx.set_edge_attributes(graph, gebc, "GEBC")

    return gebc


def compute_gebc_from_short_paths_3(short_paths, edge_to_paths, affected_path, all_sp_edges, graph):
    """
    基于更新后的最短路径信息,对 all_sp_edges 中的边进行 GEBC 属性的微调更新.

    对于每个在 all_sp_edges 中的边,
      对于 edge_to_paths 中的每个记录 ((s,t), path_index):
         旧贡献为:1/(short_paths[s,t]["count"]+1)
         新贡献为:1/(short_paths[s,t]["count"])
         修正量为:delta = 1/(short_paths[s,t]["count"]) - 1/(short_paths[s,t]["count"]+1)
         将该 delta 累加到该边的 GEBC 上.

    此外,对于 affected_path 中的 (s,t) 对(受影响的最短路径对),
    对于其在 sp 中剩余的每条最短路径,依次读取路径中的每个 edge,
    对每个 edge 更新 GEBC 值,更新量为:delta = 1/(new_count) - 1/(new_count+1)

    更新完成后,利用 nx.set_edge_attributes 将这些 GEBC 值写回 graph.
    """
    import networkx as nx

    # 初始化 all_sp_edges 中边的 GEBC 值为 0.0
    gebc = {edge: 0.0 for edge in all_sp_edges}

    # 1. 直接更新:针对所有在 edge_to_paths 中记录的 (s,t) 对
    for edge in all_sp_edges:
        if edge in edge_to_paths:
            for (s_t, path_index) in edge_to_paths[edge]:
                new_count = short_paths[s_t]["count"]  # 更新后的路径数量
                gebc[edge] = gebc[edge] * \
                    (new_count + 1) / new_count - 1 / new_count

    # 2. 间接更新:对于 affected_path 中记录的 (s,t) 对
    for s_t in affected_path:
        if s_t in short_paths:
            new_count = short_paths[s_t]["count"]
            # 遍历该 (s,t) 对所有剩余的最短路径
            for path in short_paths[s_t]["paths"]:
                # 对于路径中的每个 edge 进行更新
                for i in range(len(path) - 1):
                    edge = normalize_edge((path[i], path[i+1]))
                    if edge in all_sp_edges:
                        gebc[edge] *= (new_count + 1) / new_count

    # 更新图中 all_sp_edges 的 GEBC 属性
    nx.set_edge_attributes(graph, gebc, "GEBC")

    return gebc


# 2025-03-21

def break_PBC_graph(graph_Plus_graph_PBCx, edge_to_break):
    # 2025-03-21
    graph, new_subgraph_node = retain_largest_subgraph(graph_Plus_graph_PBCx)

    # 更新 full_graph 的图属性 'node_in_subgraph',将新节点加入其中
    graph_Plus_graph_PBCx.graph['node_in_subgraph'].update(new_subgraph_node)

    # edge_to_break 已经按照 x 坐标排序,小的在前面,大的在后面
    mapping = graph_Plus_graph_PBCx.graph['mapping']
    node_in_subgraph = graph_Plus_graph_PBCx.graph['node_in_subgraph']
    pbc_edges = graph_Plus_graph_PBCx.graph['pbc_edges']

    for edge in edge_to_break:  # edge_to_break 来自于 PBC 的 graph,这个是真正要断裂的网络
        # Todo edge_re可以删除
        edge_re = (edge[1], edge[0], edge[2])
        # 如果是跨越PBC的边
        if edge in pbc_edges or edge_re in pbc_edges:
            if edge_re in pbc_edges:
                print('error')
            if graph_Plus_graph_PBCx.has_edge(edge[1], mapping[edge[0]], edge[2]):
                graph_Plus_graph_PBCx.remove_edge(
                    edge[1], mapping[edge[0]], edge[2])
        else:  # 如果是不跨越PBC的边
            # 2025-03-21 不要做以下的精细的判断,直接在移除两个边的时候判断是否存在
            if graph_Plus_graph_PBCx.has_edge(mapping[edge[0]], mapping[edge[1]], edge[2]):
                graph_Plus_graph_PBCx.remove_edge(
                    mapping[edge[0]], mapping[edge[1]], edge[2])
            if graph_Plus_graph_PBCx.has_edge(edge[0], edge[1], edge[2]):
                graph_Plus_graph_PBCx.remove_edge(edge[0], edge[1], edge[2])
            # 2025-03-21 不要做以下的精细的判断,直接在移除两个边的时候判断是否存在
            # if edge[0] not in node_in_subgraph:
            #     graph_Plus_graph_PBCx.remove_edge(edge[0], edge[1], edge[2])
            # if mapping[edge[0]] not in node_in_subgraph:
            #     graph_Plus_graph_PBCx.remove_edge(mapping[edge[0]], mapping[edge[1]], edge[2])


def largest_graph_connectivity(full_graph):
    # 从完整图中提取最大连通子图,并获取新加入子图的节点集合
    # 调用 retain_largest_subgraph 方法,该方法返回两个值:
    #   graph: 最大连通子图
    #   new_subgraph_node: 新添加到子图中的节点集合
    graph, new_subgraph_node = retain_largest_subgraph(full_graph)

    # 更新 full_graph 的图属性 'node_in_subgraph',将新节点加入其中
    full_graph.graph['node_in_subgraph'].update(new_subgraph_node)

    # 从图属性中获取节点配对映射及子图中已有的节点集合
    mapping = full_graph.graph['mapping']
    node_in_subgraph = full_graph.graph['node_in_subgraph']

    # 构建映射的反向字典(键值对调)
    reversed_mapping = {v: k for k, v in mapping.items()}
    # 合并原始映射和反向映射,得到完整的双向映射(后续未直接使用)
    complete_mapping = mapping.copy()
    complete_mapping.update(reversed_mapping)

    # 在最大连通子图中寻找“可用”的节点:
    # 这里“可用”指的是该节点的配对节点(根据 mapping 查找)不在子图 node_in_subgraph 中
    nodes = [node for node in graph.nodes() if mapping[node]
             not in node_in_subgraph]

    # 如果没有找到任何可用节点,则输出错误信息并返回 False
    if len(nodes) <= 0:
        # print(' sample_size <= 0')
        return False

    # 如果存在可用节点,返回 True(表示所有采样的配对均已满足连通要求)
    return True  # 所有采样的配对均已连通


def detect_position_key(graph):
    for node, data in graph.nodes(data=True):
        if "position" in data:
            return "position"
        elif "posi" in data:
            return "posi"
    return None  # 如果都没有找到


def break_graph(graph_inital, graph_PBC_x,
                GEBC_mode='GEBC',  # 或者 'GEBC_m1'
                ever_break_num=10):
    broken_nums = [0]
    GEBC_sum_s = [0]  # 用于累计每次移除边的 GEBC 数值和
    graphs = [graph_inital]  # 假设 graph 和 graph_PBC_x 已在全局定义
    initial_edges_num = len(graph_inital.edges)

    while True:
        graph = graphs[-1].copy()
        
        start_time = time.time()  # 记录开始时间

        if GEBC_mode == 'GEBC':
            nm.GEBC_write_save(graph)
        elif GEBC_mode == 'GEBC_m1':
            # nm.GEBC_m1_write_save(graph)
            nm.GEBC_m1_write_save(graph)
        else:
            print('error: GEBC_mode is not defined')
            break

        end_time = time.time()  # 记录结束时间
        print(f"GEBC计算用时: {end_time - start_time:.4f} 秒" ,end=',')

        # 遍历 graph 的所有边,得到 edge:GEBC 的字典
        gebc_dict = {}
        for edge in graph.edges(data=True, keys=True):
            # 假设 GEBC 值存储在边属性中,键为 GEBC_mode
            gebc_value = edge[3].get(GEBC_mode, 0)  # 如果没有 GEBC 属性,默认为 0
            # 由于移除时可能改变节点顺序,存储正反两种顺序的键值
            key1 = (edge[0], edge[1], edge[2])
            key2 = (edge[1], edge[0], edge[2])
            gebc_dict[key1] = gebc_value
            gebc_dict[key2] = gebc_value

        # 构造唯一边列表(按节点编号顺序排序)
        edges = []
        for edge in graph.edges(keys=True):
            node1, node2, key = edge
            if node1 > node2:
                edges.append((node2, node1, key))
            else:
                edges.append(edge)
        edges = list(set(edges))

        # 计算每条边被移除的概率,正比于 GEBC 数值
        edge_gebc_values = [gebc_dict[edge] for edge in edges]
        total_gebc = sum(edge_gebc_values)
        if total_gebc == 0:
            probabilities = [1 / len(edge_gebc_values)] * \
                len(edge_gebc_values) if edge_gebc_values else []
        else:
            probabilities = [gebc / total_gebc for gebc in edge_gebc_values]

        # 随机选择 ever_break_num 条边进行移除
        ever_break_num =len(graph.edges)//100 # 100个边每一次抽取1个断裂
        print(f'网络edge: {len(graph.edges)}, ever_break_num={ever_break_num}',end=', ')
        print(f'断裂率: {1 - len(graph.edges)/initial_edges_num :.4f}')
        edges_to_remove_unsorted = random.choices(
            edges, weights=probabilities, k=ever_break_num)
        # 根据节点的 x 坐标排序(保证边的表示顺序一致)
        edges_to_remove = []
        position_key = detect_position_key(graph)
        for edge in edges_to_remove_unsorted:
            node1, node2, key = edge
            pos1 = graph.nodes[node1].get(position_key, [0])[0]
            pos2 = graph.nodes[node2].get(position_key, [0])[0]
            if pos1 > pos2:
                # 如果 pos1 > pos2 则交换顺序
                edges_to_remove.append((node2, node1, key))
            else:
                edges_to_remove.append(edge)

        # 在 graph 中移除选中的边,同时累计这些边的 GEBC 数值
        ever_GEBC_sum = 0
        for edge in edges_to_remove:
            # 由于存储时正反两种顺序都有,可直接取值
            ever_GEBC_sum += gebc_dict.get(edge, 0)
            if graph.has_edge(*edge):
                graph.remove_edge(*edge)

        # 同步在 graph_PBC_x 中移除相同的边
        break_PBC_graph(graph_PBC_x, edges_to_remove)

        # 更新累计信息,并记录当前网络状态
        graphs.append(graph.copy())
        broken_nums.append(broken_nums[-1] + ever_break_num)
        GEBC_sum_s.append(GEBC_sum_s[-1] + ever_GEBC_sum)

        # 判断网络是否断裂
        if not largest_graph_connectivity(graph_PBC_x):
            graph_series = {
                i: {
                    "graph": graphs[i],
                    "broken_num": broken_nums[i],
                    "GEBC_sum": GEBC_sum_s[i]
                } for i in range(len(graphs))
            }
            break

    return graph_series

# 2025-04-10


def process_GEBC_break(graph, ever_break_num=1, parallel_id='',
                       note='',
                       save_path='./',
                       GEBC_mode='GEBC',
                       graph_mode=''):
    # declares.note_save_path 有 使用规范

    initial_edge_num = len(graph.edges)
    graph_PBC_x = PBCx_graph(graph, graph_mode=graph_mode)

    start_time = time.time()
    graph_series = break_graph(
        graph, graph_PBC_x, GEBC_mode=GEBC_mode, ever_break_num=ever_break_num)
    end_time = time.time()

    # Save graph_series as a pickle file in the same directory as data_path
    pkl_file_path = os.path.join(save_path, f"{note}.pkl")
    with open(pkl_file_path, "wb") as pkl_file:
        pickle.dump(graph_series, pkl_file)

    # 统计 edge 的断裂率
    broken_rate = len(graph_series) * ever_break_num / initial_edge_num
    info = {
        "broken_rate": round(broken_rate, 4),
        "time_cost_seconds": round(end_time - start_time, 3),
        "current_time": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
    }
    return info

# 2025-04-10 判断是否形成宏观联通网络


def croselink_degree(map_path, detail=False):
    # 2025-03-25 判断是否形成宏观联通网络
    graph = nm.map_file_to_networkx(map_path)
    # Find the largest connected component
    largest_cc = max(nx.connected_components(graph), key=len)
    largest_subgraph = graph.subgraph(largest_cc)

    # Calculate the proportion of nodes in the largest connected component
    proportion = len(largest_subgraph.nodes) / len(graph.nodes)
    # Find the sizes of the five largest connected components
    subgraph_sizes = sorted(
        [len(c) for c in nx.connected_components(graph)], reverse=True)[:5]

    if detail:
        print(
            f"Proportion of nodes in the largest connected component: {proportion:.3f}")
        for i, size in enumerate(subgraph_sizes, start=1):
            print(f"第{i}大子图节点数:{size}")

    return proportion, subgraph_sizes


def strip_z(pos_dict):
    return {node: (coords[0], coords[1]) for node, coords in pos_dict.items()}

def draw_graph(graph, save_path):
    pos = nx.get_node_attributes(graph, 'position')
    nx.draw(graph, strip_z(pos), node_size=20, with_labels=False)
    plt.savefig(os.path.join(save_path, f"graph_pic.png"), dpi=300)


