from collections import deque
from collections import defaultdict
from MD_moment import calculate_correlation
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import MDAnalysis as mda
import os
from collections import Counter
import ast

import GraphTools.graph_method as gm
import GraphTools.GEBC_break as Gb
import GraphTools.basic_method as bm


def subtract_one_from_node_ids(G):
    """
    将图 G 中所有节点的 ID 减去1,并将所有边中 `atomlist` 字段的列表中的每个元素减去1,返回一个新的图.

    参数:
    G (nx.MultiGraph): 原始的多重图

    返回:
    nx.MultiGraph: 新的节点 ID 已减去1且边的 `atomlist` 已调整的多重图
    """
    # 创建节点映射字典
    mapping = {node: node - 1 for node in G.nodes()}

    # 使用 relabel_nodes 创建新图,copy=True 确保原图不被修改
    H = nx.relabel_nodes(G, mapping, copy=True)

    # 遍历所有边,调整 `atomlist` 字段中的元素
    for u, v, key, data in H.edges(keys=True, data=True):
        if 'atomlist' in data and isinstance(data['atomlist'], list):
            data['atomlist'] = [x - 1 for x in data['atomlist']]

    return H

# def analyze_multigraph(network_graph, full_dir_path):
#     # Initialize stats
#     total_nodes = network_graph.number_of_nodes()
#     total_edges = network_graph.number_of_edges()

#     # Degree info
#     degree_counts = {0: 0, 1: 0, 2: 0}
#     nodes_with_degree = {0: [], 1: [], 2: []}
#     degree_higher_than_2 = {}

#     nodes_with_two_edges_and_common_neighbor = []
#     loops = []

#     for node in network_graph.nodes():
#         degree = network_graph.degree(node)
#         if degree <= 2:
#             degree_counts[degree] += 1
#             nodes_with_degree[degree].append(node)

#             if degree == 2:
#                 # Check if two edges connect to the same neighbor
#                 neighbors = list(network_graph.neighbors(node))
#                 if len(neighbors) == 2 and neighbors[0] == neighbors[1]:
#                     nodes_with_two_edges_and_common_neighbor.append(node)
#         else:
#             if degree not in degree_higher_than_2:
#                 degree_higher_than_2[degree] = []
#             degree_higher_than_2[degree].append(node)

#         # Check for self-loops (loops)
#         if network_graph.has_edge(node, node):
#             loops.append(node)

#     # Compute proportions for 0, 1, 2 edges
#     isolated_proportion = degree_counts[0] / total_nodes if total_nodes > 0 else 0
#     one_edge_proportion = degree_counts[1] / total_nodes if total_nodes > 0 else 0
#     two_edge_proportion = degree_counts[2] / total_nodes if total_nodes > 0 else 0

#     # Compute proportions for nodes with more than 2 edges
#     degree_higher_proportions = {}
#     for degree, nodes in degree_higher_than_2.items():
#         degree_higher_proportions[degree] = len(nodes) / total_nodes if total_nodes > 0 else 0

#     # Number of connected components and their sizes
#     connected_components = list(nx.connected_components(network_graph.to_undirected()))
#     component_sizes = [len(component) for component in connected_components]

#     # Count how many components have a specific number of nodes
#     component_size_counts = Counter(component_sizes)

#     # Number of components
#     num_components = len(connected_components)

#     # Find the largest component size
#     largest_component_size = max(component_sizes)

#     # Calculate the gelation rate (largest component size / total nodes)
#     gelation_rate = largest_component_size / total_nodes if total_nodes > 0 else 0

#     # Loop statistics
#     total_loops = len(loops)
#     loop_proportion = total_loops / total_edges if total_edges > 0 else 0

#     # Write to file
#     save_path =os.path.join(full_dir_path, 'analysis')
#     os.makedirs(save_path, exist_ok=True)
#     output_file_path = os.path.join(save_path, "network_statistics.txt")
#     with open(output_file_path, 'w') as f:
#         # Part 1: Summary
#         f.write("Network Summary:\n")
#         f.write(f"1. Total nodes: {total_nodes}, Total edges: {total_edges}\n")
#         f.write(f"2. Nodes with 0 edges: {degree_counts[0]} ({isolated_proportion:.2%}), "
#                 f"1 edge: {degree_counts[1]} ({one_edge_proportion:.2%}), "
#                 f"2 edges: {degree_counts[2]} ({two_edge_proportion:.2%})\n")

#         # Write detailed stats for nodes with more than 2 edges
#         for degree, count in degree_higher_than_2.items():
#             f.write(f"Nodes with {degree} edges: {len(count)} ({degree_higher_proportions[degree]:.2%})\n")

#         f.write(f"3. Nodes with 2 edges and common neighbor: {len(nodes_with_two_edges_and_common_neighbor)} "
#                 f"({len(nodes_with_two_edges_and_common_neighbor)/total_nodes:.2%}), "
#                 f"Loop count: {total_loops} ({loop_proportion:.2%} of all edges)\n")

#         f.write(f"4. Number of connected components: {num_components}\n\n")

#         # Write stats about connected components
#         f.write("Connected Components Summary:\n")
#         for size, count in component_size_counts.items():
#             f.write(f"Number of components with {size} nodes: {count}\n")

#         # Write the gelation rate
#         f.write(f"\n5. Gelation rate (largest component size / total nodes): {gelation_rate:.2%}\n")

#         # Part 2: Node Lists
#         f.write("\nNode Lists:\n")
#         f.write("Nodes with 0 edges:\n")
#         f.write(f"{nodes_with_degree[0]}\n\n")
#         f.write("Nodes with 1 edge:\n")
#         f.write(f"{nodes_with_degree[1]}\n\n")
#         f.write("Nodes with 2 edges:\n")
#         f.write(f"{nodes_with_degree[2]}\n\n")

#         # Write node lists for nodes with more than 2 edges
#         for degree, nodes in degree_higher_than_2.items():
#             f.write(f"Nodes with {degree} edges:\n")
#             f.write(f"{nodes}\n\n")

#         f.write("Nodes with 2 edges and a common neighbor:\n")
#         f.write(f"{nodes_with_two_edges_and_common_neighbor}\n")

#         # Write the loops
#         f.write("\nLoops (self-connecting nodes):\n")
#         f.write(f"{loops}\n")

#     return output_file_path

# def GEBC_write_save(network_graph,full_dir_path):
#     # 计算每条边的GEBC (Generalized Edge Betweenness Centrality)
#     edge_betweenness = nx.edge_betweenness_centrality(network_graph)

#     # 将GEBC值存储在边的属性中
#     for edge, betweenness in edge_betweenness.items():
#         network_graph[edge[0]][edge[1]][0]['GEBC'] = betweenness  # 存储到边的属性中

#     # 保存网络图到指定路径,使用GML格式
#     GML_path=os.path.join(full_dir_path,'graph.gml')
#     nx.write_gml(network_graph, GML_path)


def GEBC_hist(network_graph, full_dir_path='.', note='', modeflag='GEBC'):
    # 边的属性: chain_id chain_len atomlist bondlist GEBC
    betweenness_values = []
    # Iterate through the edges and extract 'GEBC' attribute
    for u, v, data in network_graph.edges(data=True):
        if modeflag in data:
            betweenness_values.append(data[modeflag])
    # 绘制 betweenness 分布直方图
    plt.figure(figsize=(8, 6))
    n, bins, patches = plt.hist(
        betweenness_values, bins=100, color='skyblue', edgecolor='black')
    plt.title('Edge Betweenness Centrality Distribution')
    plt.xlabel('Betweenness Centrality')
    plt.ylabel('Frequency')
    plt.grid(True)

    save_path = os.path.join(full_dir_path, 'analysis')
    os.makedirs(save_path, exist_ok=True)
    betweenness_hist_path = os.path.join(
        save_path, f'{modeflag}_{note}_hist.png')
    plt.savefig(betweenness_hist_path)
    return n, bins

# def GEBC_wt_write_save(network_graph,network_mission_dir):

#     # 计算每条边的GEBC (Generalized Edge Betweenness Centrality)
#     edge_betweenness = nx.edge_betweenness_centrality(network_graph)

#     # 将GEBC值存储在边的属性中
#     for edge, betweenness in edge_betweenness.items():
#         network_graph[edge[0]][edge[1]][0]['GEBC'] = betweenness  # 存储到边的属性中

#     # 保存网络图到指定路径,使用GML格式
#     GML_path=os.path.join(full_dir_path,'graph.gml')
#     nx.write_gml(network_graph, GML_path)
# def GEBC_wt_hist(network_graph,network_mission_dir) :


def minimum_image_distance(pos1, pos2, box):
    """
    计算考虑周期性边界条件的两点之间的最短距离
    pos1 和 pos2 是两个原子的坐标,box 是模拟盒子的尺寸
    """
    delta = pos1 - pos2
    for i in range(3):  # 对 x, y, z 方向分别处理
        if abs(delta[i]) > box[i] / 2:
            delta[i] -= np.sign(delta[i]) * box[i]
    return np.linalg.norm(delta)


def load_dict(file_path):
    bond_strain_dict = {}
    with open(file_path, 'r') as f:
        for line in f:
            # 去除行末的换行符
            line = line.strip()

            # 将行分为键和值部分,格式为 (atom1, atom2): strain
            bond_str, strain_str = line.split(":")

            # 去掉键和值的多余空格
            bond_str = bond_str.strip()
            strain_str = strain_str.strip()

            # 将字符串转换为元组 (atom1, atom2) 和浮点数 strain
            bond = eval(bond_str)  # 将字符串 "(atom1, atom2)" 转换为元组
            strain = float(strain_str)  # 将字符串 "strain" 转换为浮点数

            # 存入字典
            bond_strain_dict[bond] = strain

    return bond_strain_dict


def analyze_multigraph(network_graph, full_dir_path):
    # Initialize stats
    total_nodes = network_graph.number_of_nodes()
    total_edges = network_graph.number_of_edges()

    # Degree info
    degree_counts = {0: 0, 1: 0, 2: 0}
    nodes_with_degree = {0: [], 1: [], 2: []}
    degree_higher_than_2 = {}

    nodes_with_two_edges_and_common_neighbor = []
    loops = []

    for node in network_graph.nodes():
        degree = network_graph.degree(node)
        if degree <= 2:
            degree_counts[degree] += 1
            nodes_with_degree[degree].append(node)

            if degree == 2:
                # Check if two edges connect to the same neighbor
                neighbors = list(network_graph.neighbors(node))
                if len(neighbors) == 2 and neighbors[0] == neighbors[1]:
                    nodes_with_two_edges_and_common_neighbor.append(node)
        else:
            if degree not in degree_higher_than_2:
                degree_higher_than_2[degree] = []
            degree_higher_than_2[degree].append(node)

        # Check for self-loops (loops)
        if network_graph.has_edge(node, node):
            loops.append(node)

    # Compute proportions for 0, 1, 2 edges
    isolated_proportion = degree_counts[0] / \
        total_nodes if total_nodes > 0 else 0
    one_edge_proportion = degree_counts[1] / \
        total_nodes if total_nodes > 0 else 0
    two_edge_proportion = degree_counts[2] / \
        total_nodes if total_nodes > 0 else 0

    # Compute proportions for nodes with more than 2 edges
    degree_higher_proportions = {}
    for degree, nodes in degree_higher_than_2.items():
        degree_higher_proportions[degree] = len(
            nodes) / total_nodes if total_nodes > 0 else 0

    # Number of connected components and their sizes
    connected_components = list(
        nx.connected_components(network_graph.to_undirected()))
    component_sizes = [len(component) for component in connected_components]

    # Count how many components have a specific number of nodes
    component_size_counts = Counter(component_sizes)

    # Number of components
    num_components = len(connected_components)

    # Find the largest component size
    largest_component_size = max(component_sizes)

    # Calculate the gelation rate (largest component size / total nodes)
    gelation_rate = largest_component_size / total_nodes if total_nodes > 0 else 0

    # Loop statistics
    total_loops = len(loops)
    loop_proportion = total_loops / total_edges if total_edges > 0 else 0

    # Write to file
    output_file_path = os.path.join(full_dir_path, "network_statistics.txt")
    with open(output_file_path, 'w') as f:
        # Part 1: Summary
        f.write("Network Summary:\n")
        f.write(f"1. Total nodes: {total_nodes}, Total edges: {total_edges}\n")
        f.write(f"2. Nodes with 0 edges: {degree_counts[0]} ({isolated_proportion:.2%}), "
                f"1 edge: {degree_counts[1]} ({one_edge_proportion:.2%}), "
                f"2 edges: {degree_counts[2]} ({two_edge_proportion:.2%})\n")

        # Write detailed stats for nodes with more than 2 edges
        for degree, count in degree_higher_than_2.items():
            f.write(
                f"Nodes with {degree} edges: {len(count)} ({degree_higher_proportions[degree]:.2%})\n")

        f.write(f"3. Nodes with 2 edges and common neighbor: {len(nodes_with_two_edges_and_common_neighbor)} "
                f"({len(nodes_with_two_edges_and_common_neighbor)/total_nodes:.2%}), "
                f"Loop count: {total_loops} ({loop_proportion:.2%} of all edges)\n")

        f.write(f"4. Number of connected components: {num_components}\n\n")

        # Write stats about connected components
        f.write("Connected Components Summary:\n")
        for size, count in component_size_counts.items():
            f.write(f"Number of components with {size} nodes: {count}\n")

        # Write the gelation rate
        f.write(
            f"\n5. Gelation rate (largest component size / total nodes): {gelation_rate:.2%}\n")

        # Part 2: Node Lists
        f.write("\nNode Lists:\n")
        f.write("Nodes with 0 edges:\n")
        f.write(f"{nodes_with_degree[0]}\n\n")
        f.write("Nodes with 1 edge:\n")
        f.write(f"{nodes_with_degree[1]}\n\n")
        f.write("Nodes with 2 edges:\n")
        f.write(f"{nodes_with_degree[2]}\n\n")

        # Write node lists for nodes with more than 2 edges
        for degree, nodes in degree_higher_than_2.items():
            f.write(f"Nodes with {degree} edges:\n")
            f.write(f"{nodes}\n\n")

        f.write("Nodes with 2 edges and a common neighbor:\n")
        f.write(f"{nodes_with_two_edges_and_common_neighbor}\n")

        # Write the loops
        f.write("\nLoops (self-connecting nodes):\n")
        f.write(f"{loops}\n")

    return output_file_path


def GEBC_write_save(network_graph, full_dir_path=None, k_prop=None):
    '''
    save_flag = True 会在analysis 文件下面 保存一个 graph.gml 文件
    k = int_type 抽样 k 个点,近似计算网络的 GEBC 
    '''
    # 计算每条边的GEBC (Generalized Edge Betweenness Centrality)

    if k_prop != None:
        import random
        random_state = random.Random(123)
        # print('node number:',network_graph.number_of_nodes())
        k = int(network_graph.number_of_nodes()*k_prop)

        # edge_betweenness = nx.edge_betweenness_centrality(
        #     network_graph, k=k, normalized=True, seed=random_state)
        edge_betweenness = edge_betweenness_centrality_by_graph_tool(
            network_graph, k_prop, normalized=True)
    else:
        # edge_betweenness = nx.edge_betweenness_centrality(
        #     network_graph, normalized=True)
        edge_betweenness = edge_betweenness_centrality_by_graph_tool(
            network_graph, normalized=True)
    # 将 GEBC 值存储在每条边的属性中,由于有多边存在
    for edge, betweenness in edge_betweenness.items():
        # 遍历 edge[0] 和 edge[1] 之间的所有边
        for key in network_graph[edge[0]][edge[1]]:
            network_graph[edge[0]][edge[1]
                                   ][key]['GEBC'] = betweenness  # 给每条边赋值

    # 保存网络图到指定路径,使用GML格式
    if full_dir_path:
        save_path = os.path.join(full_dir_path, 'analysis')
        os.makedirs(save_path, exist_ok=True)
        GML_path = os.path.join(save_path, 'graph.gml')
        nx.write_gml(network_graph, GML_path)

    return network_graph


def GEBC_m1_write_save(network_graph, full_dir_path=None, k_prop=None,
                       parallel_flag=False):
    # 计算每条边的GEBC (Generalized Edge Betweenness Centrality)
    # 构造GEBC_weight字典
    GEBC_weight = {}

    # 准备 position 字典
    key_name = Gb.detect_position_key(network_graph)
    position = {node: data[key_name] for node, data in network_graph.nodes(
        data=True) if key_name in data}
    GEBC_weight['position'] = position

    GEBC_weight['box'] = bm.get_box_dimensions(network_graph.graph['box'])
    GEBC_weight['mode'] = 'm1'  # m1 就只有 cos 的情况

    if parallel_flag:
        GEBC_function = nx.parallel_edge_betweenness_centrality
    else:
        GEBC_function = nx.edge_betweenness_centrality
    # 根据 k_prop 参数决定是否使用抽样近似计算
    if k_prop is not None:
        import random
        random_state = random.Random(123)
        edge_betweenness = GEBC_function(
            network_graph,
            k=k_prop,
            GEBC_weight=GEBC_weight,
            normalized=True,
            seed=random_state
        )
    else:
        edge_betweenness = GEBC_function(
            network_graph,
            GEBC_weight=GEBC_weight,
            normalized=True
        )

    # 将 GEBC_m1 值存储在每条边的属性中(考虑多边的情况)
    for edge, betweenness in edge_betweenness.items():
        for key in network_graph[edge[0]][edge[1]]:
            network_graph[edge[0]][edge[1]][key]['GEBC_m1'] = betweenness

    # 保存网络图到指定路径,使用GML格式
    if full_dir_path:
        save_path = os.path.join(full_dir_path, 'analysis')
        os.makedirs(save_path, exist_ok=True)
        GML_path = os.path.join(save_path, 'graph.gml')
        nx.write_gml(network_graph, GML_path)

    return network_graph


def edge_betweenness_centrality_by_graph_tool(G, k=None, normalized=True):
    """
    利用 graph-tool 为 NetworkX 图 G 的边计算边介数中心性 (GEBC),
    将计算结果写入 NetworkX 图中边的属性 'GEBC_v1',并返回一个字典,其键为边的元组,值为 GEBC.

    参数:
        G : networkx.Graph 或 networkx.DiGraph
        k : float,介于 0 到 1 之间,表示随机抽取边的比例;若为 None,则计算整个图.
        normalized : bool,是否对 GEBC 进行归一化.

    返回:
        dict,格式为 {(u, v): GEBC, ...}
    """
    import random
    from graph_tool.all import Graph, betweenness
    import numpy as np

    # 构造一个 graph-tool 图(保持与 G 同向/无向)
    gt_graph = Graph(directed=G.is_directed())

    # 建立 NetworkX 与 graph-tool 之间的节点映射
    nx_to_gt = {}
    gt_to_nx = {}
    for node in G.nodes():
        vertex = gt_graph.add_vertex()
        nx_to_gt[node] = vertex
        gt_to_nx[vertex] = node

    # 将所有边从 NetworkX 添加到 graph-tool 图中
    for u, v in G.edges():
        gt_graph.add_edge(nx_to_gt[u], nx_to_gt[v])
    all_edges = list(gt_graph.edges())

    # 根据参数 k 决定是否对边进行抽样
    if k is None:
        # 全图计算,无需 pivots 参数
        _, edge_centrality = betweenness(gt_graph, norm=normalized)
        selected_edges = all_edges
    else:
        # 随机抽取 k 比例的边,并提取这些边的所有端点作为 pivots
        num_select = int(k * len(all_edges))
        selected_edges = random.sample(all_edges, num_select)
        sampled_node_ids = {int(edge.source()) for edge in selected_edges} | \
                           {int(edge.target()) for edge in selected_edges}
        pivots = np.array(list(sampled_node_ids), dtype=np.uint64)
        _, edge_centrality = betweenness(
            gt_graph, pivots=pivots, norm=normalized)

    # 将计算得到的 GEBC 写回到 NetworkX 图中,并构造返回字典
    edges_dict = {}
    for edge in selected_edges:
        u_node = gt_to_nx[edge.source()]
        v_node = gt_to_nx[edge.target()]
        # 判断该边在 NetworkX 图中的具体表示(有向或无向)
        if G.has_edge(u_node, v_node):
            key = (u_node, v_node)
        elif G.has_edge(v_node, u_node):
            key = (v_node, u_node)
        else:
            continue  # 如果该边在 G 中不存在,则跳过
        centrality_val = edge_centrality[edge]
        # G[key[0]][key[1]]['GEBC'] = centrality_val
        edges_dict[key] = centrality_val

    return edges_dict


# def GEBC_hist(network_graph, full_dir_path):
#     # 边的属性: chain_id chain_len atomlist bondlist GEBC
#     betweenness_values = []
#     # Iterate through the edges and extract 'GEBC' attribute
#     for u, v, data in network_graph.edges(data=True):
#         if 'GEBC' in data:
#             betweenness_values.append(data['GEBC'])
#     # 绘制 betweenness 分布直方图
#     plt.figure(figsize=(8, 6))
#     n, bins, patches = plt.hist(betweenness_values, bins=100, color='skyblue', edgecolor='black')
#     plt.title('Edge Betweenness Centrality Distribution')
#     plt.xlabel('Betweenness Centrality')
#     plt.ylabel('Frequency')
#     plt.grid(True)
#     betweenness_hist_path=os.path.join(full_dir_path,'betweenness_hist.png')
#     plt.savefig(betweenness_hist_path)
#     return n, bins

def graph_plot(network_graph, full_dir_path):
    # 绘制图,显示节点和边的属性
    # pos = nx.kamada_kawai_layout(network_graph) # 这个算法对大规模网络太慢
    pos = nx.spring_layout(network_graph)
    plt.figure(figsize=(10, 8))  # 调整图像大小使其更稀疏
    nx.draw(network_graph, pos, with_labels=True, node_size=500,
            node_color='skyblue', edge_color='gray', font_size=10)
    # 为边标签添加 GEBC 值
    edge_labels = {(u, v): f"GEBC={d['GEBC']:.4f}" for u, v, d in network_graph.edges(
        data=True) if 'GEBC' in d}
    nx.draw_networkx_edge_labels(network_graph, pos, edge_labels=edge_labels)

    # 保存图像
    img_path = os.path.join(full_dir_path, 'graph.png')
    plt.savefig(img_path)


def graph_position_write(G, network_mission_dir,
                         initial_positions_dict, final_positions_dict,
                         box_initial, box_final):
    """
    读取网络文件,给每个节点(节点ID为atom的索引)添加初始位置和最终位置,
    同时将 box_initial 和 box_final 保存为网络的属性.最后将更新后的网络保存到 network_path.

    参数:
    - network_path: 网络文件的路径
    - initial_positions: 原子的初始位置 
    - final_positions: 原子的最终位置 
    - box_initial: 初始盒子的尺寸 (3 个元素)
    - box_final: 最终盒子的尺寸 (3 个元素)
    """

    # 遍历网络中的每个节点,节点 ID 对应原子索引
    for node_id in G.nodes:
        node_id_int = int(node_id)
        if node_id_int + 1 in initial_positions_dict and node_id_int + 1 in final_positions_dict:
            # 添加每个节点的初始位置和最终位置,node_id 对应 atom 的索引
            # 转换为列表
            G.nodes[node_id]['initial_position'] = initial_positions_dict[node_id_int+1].tolist()
            # 转换为列表
            G.nodes[node_id]['final_position'] = final_positions_dict[node_id_int+1].tolist()
        else:
            print(f"节点 {node_id} 超出索引范围.")

    # 给网络添加属性:盒子的初始和最终尺寸(转换为字符串保存)
    G.graph['box_initial'] = str(box_initial.tolist())  # 将数组转换为字符串
    G.graph['box_final'] = str(box_final.tolist())  # 将数组转换为字符串

    # 将更新后的网络保存回 network_path
    save_path = os.path.join(network_mission_dir, 'analysis')
    os.makedirs(save_path, exist_ok=True)
    nx.write_gml(G, os.path.join(save_path, 'graph.gml'))


def normalize_coordinates_init_state(md_state, atom_type=None):
    # 提取盒子的下界和上界
    lower_bounds = md_state.dimensions[:3]  # 盒子的下界 (x_lower, y_lower, z_lower)
    upper_bounds = md_state.dimensions[3:]  # 盒子的上界 (x_upper, y_upper, z_upper)

    # 计算盒子的尺寸 (Lx, Ly, Lz)
    box_len = upper_bounds - lower_bounds  # 盒子尺寸

    # 获取原子的实际坐标,根据是否指定 atom_type 筛选原子
    if atom_type is None:
        atom_ids = md_state.atoms.ids  # 所有原子的ID
        positions = md_state.atoms.positions  # 所有原子坐标
    else:
        type_atoms = md_state.atoms.select_atoms(atom_type)  # 指定类型的原子
        atom_ids = type_atoms.ids  # 指定类型原子的ID
        positions = type_atoms.positions  # 获取指定类型原子的坐标

    # 计算归一化坐标 (positions 已经是 Nx3 矩阵)
    normalized_positions = (positions - lower_bounds) / box_len

    # 创建字典,key 是原子的 ID,value 是归一化后的坐标
    normalized_positions_dict = {
        atom_id: normalized_positions[i] for i, atom_id in enumerate(atom_ids)}

    # 返回盒子的尺寸和归一化后的坐标字典
    return box_len, normalized_positions_dict


def compute_bond_strain(initial_positions, final_positions, bonds, box_initial, box_final, network_mission_dir):
    """计算每个键的形变率,考虑周期性边界条件 (PBC),初始和最终坐标使用不同的盒子尺寸"""

    bond_strain_dict = {}
    for bond in bonds:
        atom1, atom2 = bond

        # 计算初始长度,使用初始盒子尺寸
        initial_length = minimum_image_distance(
            initial_positions[atom1], initial_positions[atom2], box_initial)

        # 计算最终长度,使用最终盒子尺寸
        final_length = minimum_image_distance(
            final_positions[atom1], final_positions[atom2], box_final)

        # 计算形变率
        strain = (final_length - initial_length) / initial_length

        # 将结果存入字典,键为 bond,值为 strain
        bond_strain_dict[bond] = strain

    save_path = os.path.join(network_mission_dir, 'analysis')
    os.makedirs(save_path, exist_ok=True)
    with open(os.path.join(save_path, 'edge_strain_dict.txt'), 'w') as f:
        for bond, strain in bond_strain_dict.items():
            f.write(f"{bond}: {strain}\n")
    # 加载时利用 bond_strain_dict = nm.load_dict(path)
    return bond_strain_dict


def plot_strain_histogram(network, network_mission_dir="."):
    """
    绘制形变率直方图,并标注总形变率.

    参数:
    - network: 含有节点(原子)的初始和最终归一化位置,以及盒子尺寸的网络
    - total_strain_ratio: 总形变率(如果未提供,则使用 network 中的盒子信息计算)
    - network_mission_dir: 保存图像的目录路径
    """

    # 从网络中提取盒子尺寸
    # 假设 network.graph['box_initial'] 和 network.graph['box_final'] 是字符串

    box_initial = ast.literal_eval(network.graph['box_initial'])
    box_final = ast.literal_eval(network.graph['box_final'])

    # 如果没有给出总形变率,则计算总的形变率
    total_strain_ratio = (box_final[0] / box_initial[0]) - 1  # 这里使用 x 方向的形变率

    strains = []
    strains_dict = {}
    # 遍历 network 中的每一个边,计算每个键的形变
    for u, v in network.edges():
        # 获取初始和末尾的归一化坐标
        initial_pos_u = np.array(network.nodes[u]['initial_position'])
        initial_pos_v = np.array(network.nodes[v]['initial_position'])
        final_pos_u = np.array(network.nodes[u]['final_position'])
        final_pos_v = np.array(network.nodes[v]['final_position'])

        # 计算初始的实际距离和最终的实际距离
        initial_distance = np.linalg.norm(
            (initial_pos_v - initial_pos_u) * box_initial)
        final_distance = np.linalg.norm(
            (final_pos_v - final_pos_u) * box_final)

        # 计算每个键的形变率
        strain = (final_distance - initial_distance) / \
            initial_distance if initial_distance != 0 else 0
        strains.append(strain)
        strains_dict[(u, v)] = strain

    # 绘制形变率的直方图
    plt.figure(figsize=(8, 6))
    plt.hist(strains, bins=200, alpha=0.7, label="edge strains")
    plt.xlim(right=10)
    plt.axvline(total_strain_ratio, color='r', linestyle='--',
                label=f"Total strain ratio: {total_strain_ratio:.2f}")
    plt.xlabel("Strain")
    plt.ylabel("Frequency")
    plt.legend()
    plt.title("edge Strain Distribution")

    # 保存图像为 SVG 文件
    save_path = os.path.join(network_mission_dir, 'analysis')
    os.makedirs(save_path, exist_ok=True)
    plt.savefig(os.path.join(save_path, 'bond_strain_Distribution_x=10.svg'))
    plt.close()

    # 绘制形变率的直方图
    plt.figure(figsize=(8, 6))
    plt.hist(strains, bins=200, alpha=0.7, label="edge strains")
    plt.xlim(right=3)
    plt.axvline(total_strain_ratio, color='r', linestyle='--',
                label=f"Total strain ratio: {total_strain_ratio:.2f}")
    plt.xlabel("Strain")
    plt.ylabel("Frequency")
    plt.legend()
    plt.title("edge Strain Distribution")

    # 保存图像为 SVG 文件
    save_path = os.path.join(network_mission_dir, 'analysis')
    os.makedirs(save_path, exist_ok=True)
    plt.savefig(os.path.join(save_path, 'bond_strain_Distribution_x=3.svg'))

    return strains_dict


def plot_x_strain_histogram(network, note='', network_mission_dir="."):
    """
    绘制在x方向投影的形变率直方图,并标注总形变率.

    参数:
    - network: 含有节点(原子)的初始和最终归一化位置,以及盒子尺寸的网络
    - total_strain_ratio: 总形变率(如果未提供,则使用 network 中的盒子信息计算)
    - network_mission_dir: 保存图像的目录路径
    """

    # 从网络中提取盒子尺寸
    # 假设 network.graph['box_initial'] 和 network.graph['box_final'] 是字符串

    box_initial = ast.literal_eval(network.graph['box_initial'])
    box_final = ast.literal_eval(network.graph['box_final'])

    # 如果没有给出总形变率,则计算总的形变率
    total_strain_ratio = (box_final[0] / box_initial[0]) - 1  # 这里使用 x 方向的形变率

    x_strains = []
    x_strains_dict = {}
    # 遍历 network 中的每一个边,计算每个键的形变
    all_point = []
    for u, v in network.edges():
        # 获取初始和末尾的归一化坐标
        initial_pos_u = np.array(network.nodes[u]['initial_position'])
        initial_pos_v = np.array(network.nodes[v]['initial_position'])
        final_pos_u = np.array(network.nodes[u]['final_position'])
        final_pos_v = np.array(network.nodes[v]['final_position'])

        # check space start
        all_point.append(initial_pos_u[0])
        # check space end

        # 计算初始和末端在 x 轴方向的分量距离,考虑周期性边界条件
        delta_initial_x = initial_pos_v[0] - initial_pos_u[0]
        delta_final_x = final_pos_v[0] - final_pos_u[0]

        # 调整delta_initial_x和delta_final_x以确保考虑周期性边界条件
        box_length_initial_x = box_initial[0]
        box_length_final_x = box_final[0]

        # 如果 delta 超过一半的 box 长度,则减去 box 长度以获得最小距离
        if delta_initial_x > 0.5 * box_length_initial_x:
            delta_initial_x -= box_length_initial_x
        elif delta_initial_x < -0.5 * box_length_initial_x:
            delta_initial_x += box_length_initial_x

        if delta_final_x > 0.5 * box_length_final_x:
            delta_final_x -= box_length_final_x
        elif delta_final_x < -0.5 * box_length_final_x:
            delta_final_x += box_length_final_x

        # 计算调整后的 x 轴方向分量距离
        initial_distance_x = delta_initial_x * box_initial[0]
        final_distance_x = delta_final_x * box_final[0]

        # 计算 x 轴方向的形变率
        strain_x = (final_distance_x - initial_distance_x) / \
            initial_distance_x if initial_distance_x != 0 else 0
        x_strains.append(strain_x)
        x_strains_dict[(u, v)] = strain_x

    print(max(all_point), min(all_point))

    # 筛选形变率数据,只保留在 x_left_lim 和 x_right_lim 之间的值
    # x_left_lim =   x_left_lim <=
    # x_right_lim=3
    # filtered_strains = [strain for strain in x_strains if  abs(strain) <= x_right_lim]
    filtered_strains = [strain for strain in x_strains]
    # 绘制形变率的直方图
    plt.figure(figsize=(8, 6))
    bins = 100
    plt.hist(filtered_strains, bins=bins, alpha=0.7, label="edge strains")
    # plt.xlim()
    plt.axvline(total_strain_ratio, color='r', linestyle='--',
                label=f"Total strain ratio: {total_strain_ratio:.2f}")
    plt.xlabel("Strain")
    plt.ylabel("Frequency")
    plt.legend()
    plt.title("Edge Strain Distribution")

    # 保存图像
    save_path = os.path.join(network_mission_dir, 'analysis')
    os.makedirs(save_path, exist_ok=True)
    plt.savefig(os.path.join(
        save_path, f'bond_full_x_strain_Dist_full_{note}.png'))
    plt.close()

    # 绘制形变率的直方图
    # plt.figure(figsize=(8, 6))
    # plt.hist(x_strains, bins=200, alpha=0.7, label="edge strains")
    # plt.xlim(right=3)
    # plt.axvline(total_strain_ratio, color='r', linestyle='--', label=f"Total strain ratio: {total_strain_ratio:.2f}")
    # plt.xlabel("Strain")
    # plt.ylabel("Frequency")
    # plt.legend()
    # plt.title("edge Strain Distribution")

    # 保存图像为 SVG 文件
    # save_path = os.path.join(network_mission_dir, 'analysis')
    # os.makedirs(save_path, exist_ok=True)
    # plt.savefig(os.path.join(save_path, 'bond_x_strain_Distribution_x=3.svg'))

    return x_strains_dict


def plot_d_yz_histogram(network, network_mission_dir):
    """
    计算每个键的两个端点在 yz 方向上的位移幅度 d_yz,并保存为字典.
    d_zy = 两端点zy位移的平方和开根号
    归一化坐标计算,不涉及盒子大小.

    参数:
    - network: 包含节点的初始和最终归一化位置的网络
    - network_mission_dir: 用于保存分析结果的目录
    """

    def minimum_image_distance(pos1, pos2):
        """计算最小映像距离,输入为归一化坐标"""
        delta = pos2 - pos1
        delta -= np.round(delta)  # 最小映像条件,确保最短距离
        return delta  # 返回归一化坐标差

    bond_displacement_dict = {}
    bond_displacement_list = []

    # 遍历网络中的每条边,计算每个键的位移幅度
    for u, v in network.edges():
        # 获取初始和最终状态的中点
        initial_pos_u = np.array(network.nodes[u]['initial_position'])[:2]
        initial_pos_v = np.array(network.nodes[v]['initial_position'])[:2]
        final_pos_u = np.array(network.nodes[u]['final_position'])[:2]
        final_pos_v = np.array(network.nodes[v]['final_position'])[:2]

        initial_dis = minimum_image_distance(initial_pos_u, initial_pos_v)
        final_dis = minimum_image_distance(final_pos_u, final_pos_v)

        # 计算 yz 方向上的位移
        d_yz = np.linalg.norm(initial_dis) + np.linalg.norm(final_dis)

        # 保存到字典,键为 (u, v) 这个键
        bond_displacement_dict[(u, v)] = d_yz
        bond_displacement_list.append(d_yz)

    # 绘制形变率的直方图
    plt.figure(figsize=(8, 6))
    plt.hist(bond_displacement_list, bins=60,
             alpha=0.7, label="yz displacement")
    plt.xlim(right=1)
    plt.xlabel("Displacement")
    plt.ylabel("Frequency")
    plt.legend()
    plt.title("yz Displacement Distribution")

    # 保存图像为 SVG 文件
    save_path = os.path.join(network_mission_dir, 'analysis')
    os.makedirs(save_path, exist_ok=True)
    plt.savefig(os.path.join(save_path, 'yz_Displacement_Distribution.svg'))

    return bond_displacement_dict


def normalize_coordinates_final_state(network_mission_dir):
    # 在 net_works 中寻找包含对应的末态.
    dump_names = [d for d in os.listdir(
        network_mission_dir) if d.endswith('dump')]
    last_dump_name = sorted(dump_names)[-1]
    last_dump_path = os.path.join(network_mission_dir, last_dump_name)
    u = mda.Universe(last_dump_path, topology_format='LAMMPSDUMP')
    # last_state=u.trajectory[-1]  # -1 表示最后一帧,并且在此之后 u,直接指向最后一帧
    u.trajectory[-1]  # -1 表示最后一帧,并且在此之后 u,直接指向最后一帧

    # 获取初始态和末态的归一化坐标,dump已经归一化.

    box_final = u.dimensions[:3]  # 盒子的尺寸 (x_lower, y_lower, z_lower)
    # 获取所有原子的坐标和ID
    final_positions = u.atoms.positions / box_final  # u.atoms.positions 会计算出坐标已经归一化
    atom_ids = u.atoms.ids         # 原子的ID
    final_positions_dict = {
        atom_id: final_positions[i] for i, atom_id in enumerate(atom_ids)}
    return box_final, final_positions_dict


def normalize_coordinates_dump(dump_file):

    u = mda.Universe(dump_file, topology_format='LAMMPSDUMP')
    u.trajectory[-1]  # -1 表示最后一帧,并且在此之后 u,直接指向最后一帧

    # 获取初始态和末态的归一化坐标,dump已经归一化.

    box_final = u.dimensions[:3]  # 盒子的尺寸 (x_lower, y_lower, z_lower)
    # 获取所有原子的坐标和ID
    final_positions = u.atoms.positions / box_final  # u.atoms.positions 会计算出坐标已经归一化
    atom_ids = u.atoms.ids         # 原子的ID
    final_positions_dict = {
        atom_id: final_positions[i] for i, atom_id in enumerate(atom_ids)}
    return box_final, final_positions_dict


def d_yz_hist_plot(network_mission_dir):
    # 读取这个文件,然后绘制他的直方图
    d_yz_path = os.path.join(network_mission_dir, 'analysis/d_yz.txt')
    displacement_values = []
    with open(d_yz_path, 'r') as f:
        for line in f:
            bond, d_yz = line.split(':')
            displacement_values.append(float(d_yz.strip()))

    # 绘制形变率的直方图
    plt.figure(figsize=(8, 6))
    plt.hist(displacement_values, bins=200, alpha=0.7, label="yz displacement")
    plt.xlim(right=3)
    plt.xlabel("Displacement")
    plt.ylabel("Frequency")
    plt.legend()
    plt.title("yz Displacement Distribution")

    # 保存图像为 SVG 文件
    save_path = os.path.join(network_mission_dir, 'analysis')
    os.makedirs(save_path, exist_ok=True)
    plt.savefig(os.path.join(save_path, 'yz_Displacement_Distribution.svg'))


def bond_break_timesteps(bond_path):
    timesteps = []

    with open(bond_path, 'r') as file:
        for line in file:
            line = line.strip()
            if line.startswith('ITEM: TIMESTEP'):
                # 读取当前的 timestep
                timestep_line = next(file, None)
                if timestep_line:
                    try:
                        current_timestep = int(timestep_line.strip())
                        timesteps.append(current_timestep)
                    except ValueError:
                        # 如果无法转换为整数,则跳过
                        continue

    return timesteps

# bond 文件的读取


def wipe_off_matching_elements(prev_bonds, current_bonds):
    for bond in current_bonds:
        if bond in prev_bonds:
            prev_bonds.remove(bond)
    return prev_bonds


def break_frame_old(bond_path):
    result = {}
    prev_number_of_entries = None
    prev_bonds = set()

    with open(bond_path, 'r') as file:
        while True:
            line = file.readline()
            if not line:
                break  # End of file

            line = line.strip()
            if line.startswith('ITEM: TIMESTEP'):
                # Read the current timestep
                timestep_line = file.readline()
                if not timestep_line:
                    break
                current_timestep = int(timestep_line.strip())

                # Read until 'ITEM: NUMBER OF ENTRIES'
                while True:
                    line = file.readline()
                    if not line:
                        break
                    line = line.strip()
                    if line.startswith('ITEM: NUMBER OF ENTRIES'):
                        break

                # Read the current number of entries
                number_of_entries_line = file.readline()
                if not number_of_entries_line:
                    break
                current_number_of_entries = int(number_of_entries_line.strip())

                # Compare with the previous number of entries
                if prev_number_of_entries is None:
                    # First frame, store initial data
                    prev_number_of_entries = current_number_of_entries

                    # Read 'ITEM: BOX BOUNDS' and the bounds
                    while True:
                        line = file.readline()
                        if not line:
                            break
                        line = line.strip()
                        if line.startswith('ITEM: BOX BOUNDS'):
                            break

                    box_bounds = []
                    for _ in range(3):
                        bounds_line = file.readline()
                        if not bounds_line:
                            break
                        box_bounds.append(bounds_line.strip())

                    # Calculate initial lx
                    x_bounds = box_bounds[0].split()
                    lx0 = float(x_bounds[1]) - float(x_bounds[0])

                    # Skip 'ITEM: ENTRIES' line
                    while True:
                        line = file.readline()
                        if not line:
                            break
                        line = line.strip()
                        if line.startswith('ITEM: ENTRIES'):
                            break

                    # Read the bond entries
                    prev_bonds = set()
                    for _ in range(current_number_of_entries):
                        entry_line = file.readline()
                        if not entry_line:
                            break
                        parts = entry_line.strip().split()
                        bond = tuple(map(int, parts[1:3]))
                        prev_bonds.add(bond)

                    # Store initial frame data
                    result[current_timestep] = {
                        'NUMBER_OF_ENTRIES': current_number_of_entries,
                        'BOX_BOUNDS': box_bounds
                    }
                else:
                    if current_number_of_entries == prev_number_of_entries:
                        # Skip the rest of the frame (we know how many lines to skip)
                        # Skip 'ITEM: BOX BOUNDS' and bounds
                        while True:
                            line = file.readline()
                            if not line:
                                break
                            line = line.strip()
                            if line.startswith('ITEM: BOX BOUNDS'):
                                break
                        for _ in range(3):
                            file.readline()

                        # Skip 'ITEM: ENTRIES' and the entries
                        while True:
                            line = file.readline()
                            if not line:
                                break
                            line = line.strip()
                            if line.startswith('ITEM: ENTRIES'):
                                break
                        for _ in range(current_number_of_entries):
                            file.readline()
                    else:
                        # Number of entries has changed, process this frame
                        # Read 'ITEM: BOX BOUNDS' and the bounds
                        while True:
                            line = file.readline()
                            if not line:
                                break
                            line = line.strip()
                            if line.startswith('ITEM: BOX BOUNDS'):
                                break

                        box_bounds = []
                        for _ in range(3):
                            bounds_line = file.readline()
                            if not bounds_line:
                                break
                            box_bounds.append(bounds_line.strip())

                        # Calculate current lx and strain
                        x_bounds = box_bounds[0].split()
                        lx = float(x_bounds[1]) - float(x_bounds[0])
                        strain = (lx - lx0) / lx0

                        # Skip 'ITEM: ENTRIES' line
                        while True:
                            line = file.readline()
                            if not line:
                                break
                            line = line.strip()
                            if line.startswith('ITEM: ENTRIES'):
                                break

                        # Read the bond entries
                        current_bonds = set()
                        for _ in range(current_number_of_entries):
                            entry_line = file.readline()
                            if not entry_line:
                                break
                            parts = entry_line.strip().split()
                            bond = tuple(map(int, parts[1:3]))
                            current_bonds.add(bond)

                        # Find broken bonds
                        broken_bonds = prev_bonds - current_bonds

                        # Store the result
                        result[current_timestep] = {
                            'NUMBER_OF_ENTRIES': current_number_of_entries,
                            'BOX_BOUNDS': box_bounds,
                            'STRAIN_X': strain,
                            'BROKEN_BONDS': list(broken_bonds)
                        }

                        # Update previous bonds and number of entries
                        prev_bonds = current_bonds
                        prev_number_of_entries = current_number_of_entries
            else:
                continue  # Skip any irrelevant lines

    return result


def break_frame(bond_path):
    result = {}
    prev_number_of_entries = None
    prev_bonds = Counter()  # 使用 Counter 代替 set

    with open(bond_path, 'r') as file:
        while True:
            line = file.readline()
            if not line:
                break  # 文件结束

            line = line.strip()
            if line.startswith('ITEM: TIMESTEP'):
                # 读取当前 timestep
                timestep_line = file.readline()
                if not timestep_line:
                    break
                current_timestep = int(timestep_line.strip())

                # 读取直到 'ITEM: NUMBER OF ENTRIES'
                while True:
                    line = file.readline()
                    if not line:
                        break
                    line = line.strip()
                    if line.startswith('ITEM: NUMBER OF ENTRIES'):
                        break

                # 读取当前的 entries 数量
                number_of_entries_line = file.readline()
                if not number_of_entries_line:
                    break
                current_number_of_entries = int(number_of_entries_line.strip())

                if prev_number_of_entries is None:
                    # 第一帧,保存初始数据
                    prev_number_of_entries = current_number_of_entries

                    # 读取 'ITEM: BOX BOUNDS' 以及边界信息
                    while True:
                        line = file.readline()
                        if not line:
                            break
                        line = line.strip()
                        if line.startswith('ITEM: BOX BOUNDS'):
                            break

                    box_bounds = []
                    for _ in range(3):
                        bounds_line = file.readline()
                        if not bounds_line:
                            break
                        box_bounds.append(bounds_line.strip())

                    # 计算初始 lx
                    x_bounds = box_bounds[0].split()
                    lx0 = float(x_bounds[1]) - float(x_bounds[0])

                    # 跳过 'ITEM: ENTRIES' 行
                    while True:
                        line = file.readline()
                        if not line:
                            break
                        line = line.strip()
                        if line.startswith('ITEM: ENTRIES'):
                            break

                    # 读取 bond entries,记录每个 bond 的出现次数
                    prev_bonds = Counter()
                    for _ in range(current_number_of_entries):
                        entry_line = file.readline()
                        if not entry_line:
                            break
                        parts = entry_line.strip().split()
                        bond = tuple(map(int, parts[1:3]))
                        prev_bonds[bond] += 1

                    # 存储初始帧数据
                    result[current_timestep] = {
                        'NUMBER_OF_ENTRIES': current_number_of_entries,
                        'BOX_BOUNDS': box_bounds
                    }
                else:
                    if current_number_of_entries == prev_number_of_entries:
                        # 如果 entries 数量未改变,则跳过该帧
                        while True:
                            line = file.readline()
                            if not line:
                                break
                            line = line.strip()
                            if line.startswith('ITEM: BOX BOUNDS'):
                                break
                        for _ in range(3):
                            file.readline()
                        while True:
                            line = file.readline()
                            if not line:
                                break
                            line = line.strip()
                            if line.startswith('ITEM: ENTRIES'):
                                break
                        for _ in range(current_number_of_entries):
                            file.readline()
                    else:
                        # 如果 entries 数量发生改变,处理该帧
                        while True:
                            line = file.readline()
                            if not line:
                                break
                            line = line.strip()
                            if line.startswith('ITEM: BOX BOUNDS'):
                                break

                        box_bounds = []
                        for _ in range(3):
                            bounds_line = file.readline()
                            if not bounds_line:
                                break
                            box_bounds.append(bounds_line.strip())

                        # 计算当前 lx 与 strain
                        x_bounds = box_bounds[0].split()
                        lx = float(x_bounds[1]) - float(x_bounds[0])
                        strain = (lx - lx0) / lx0

                        # 跳过 'ITEM: ENTRIES' 行
                        while True:
                            line = file.readline()
                            if not line:
                                break
                            line = line.strip()
                            if line.startswith('ITEM: ENTRIES'):
                                break

                        # 读取当前帧的 bond entries,使用 Counter 记录次数
                        current_bonds = Counter()
                        for _ in range(current_number_of_entries):
                            entry_line = file.readline()
                            if not entry_line:
                                break
                            parts = entry_line.strip().split()
                            bond = tuple(map(int, parts[1:3]))
                            current_bonds[bond] += 1

                        # 计算 broken bonds:
                        # Counter 的减法会对相同键执行计数相减,负值自动忽略
                        broken_bonds = prev_bonds - current_bonds

                        # 存储结果,这里以字典形式保存每个 bond 及其断裂次数
                        result[current_timestep] = {
                            'NUMBER_OF_ENTRIES': current_number_of_entries,
                            'BOX_BOUNDS': box_bounds,
                            'STRAIN_X': strain,
                            # 每个 bond 的断裂数量
                            'BROKEN_BONDS': list(broken_bonds.elements())
                        }

                        # 更新 prev_bonds 与 prev_number_of_entries
                        prev_bonds = current_bonds
                        prev_number_of_entries = current_number_of_entries
            else:
                continue  # 跳过无关行

    return result


def __data_file_to_networkx(filepath):
    # 这个函数是废物,读取了所有的边?2025-03-16
    """
    将 LAMMPS 数据文件转换为 NetworkX MultiGraph ,并在边的 data 中增加 atomlist.

    - filepath: LAMMPS data 文件的路径. 
    - 返回: 一个 NetworkX MultiGraph
    """
    # 初始化 MultiGraph 对象
    G = nx.MultiGraph()

    # 使用 MDAnalysis 读取 LAMMPS data 数据文件
    u = mda.Universe(filepath)

    # 遍历所有原子,添加节点及其类型属性
    for atom in u.atoms:
        G.add_node(atom.id, type=atom.type)

    # 遍历所有 bond,添加 edge
    for bond in u.bonds:
        atom1 = bond.atoms[0]
        atom2 = bond.atoms[1]

        # 获取原子 ID 和其他相关信息(例如类型)
        atom1_id = atom1.id
        atom2_id = atom2.id

        # 添加边到图中
        G.add_edge(
            atom1_id,
            atom2_id
        )

    reduce_G = reduce_graph(G)
    a = gm.all_atomlist(reduce_G)
    # print(len(a))
    return G


def _find_type1_to_type1_path(G, start):
    """
    在图 G 中,从 start (type=1) 节点出发,仅在类型为 type=2 的节点上行走,
    直到到达 type=1 节点.(有可能是同一个节点)搜集所有满足条件的路径并返回.

    返回:
        results: list[tuple]
            其中每个元素是一个元组,表示一条从 start 到另一 type=1 节点的完整路径.
            若找不到满足条件的路径,则返回空列表.
    """
    # 1. 起点检查
    if start not in G or G.nodes[start].get('type') != '1':
        return []

    results = []

    def dfs(path):
        """
        path: 当前 DFS 路径(列表存储), 其最后一个元素是本次 DFS 所在节点
        """
        current = path[-1]

        # 2. 遍历 current 的所有邻居
        for neighbor in G.neighbors(current):
            # 如果该邻居在当前 path 上出现过,说明形成环路,跳过以防死循环
            if neighbor in path:
                continue

            neighbor_type = G.nodes[neighbor].get('type')

            if neighbor_type == '1':
                # 一旦遇到 type=1 节点,收集路径并停止向这个方向继续
                results.append(tuple(path + [neighbor]))
            elif neighbor_type == '2':
                # 若类型为2,继续 DFS
                dfs(path + [neighbor])
            else:
                # 遇到其他类型的节点,不作处理(不继续深入)
                continue

    # 3. 从 start 节点出发,开始 DFS
    dfs([start])

    return results


def reduce_graph(G):
    """
    对原图 G 进行约化,返回一个新的图 G_reduced:
      1. 只保留 type=1 的原子作为节点;
      2. 若两节点 (type=1) 之间存在仅由若干 type=2 节点连接的路径,
         则在 G_reduced 中添加一条边,并将这些 type=2 节点存储到此边的 'atomlist' 属性中.

    """
    # todo:del
    del_atom_num = 0

    # 收集所有的 type=1 节点
    type1_nodes = [n for n, d in G.nodes(data=True) if d.get('type') == '1']

    # 构建一个新的 MultiGraph 只包含 type=1 节点
    G_reduced = nx.MultiGraph()
    for n in type1_nodes:
        G_reduced.add_node(n, **G.nodes[n])

    processed_type1 = set()  # 记录已经处理过的 type=1 节点

    while True:
        # 当前图中存在,且尚未处理过的 type=1 节点
        remaining_type1 = [
            n for n in type1_nodes if n in G and n not in processed_type1]
        if not remaining_type1:
            break

        # 拿一个还没处理的 type=1 节点作为起点
        node_a = remaining_type1[0]

        # 使用 _find_type1_to_type1_path 寻找路径
        paths = _find_type1_to_type1_path(G, node_a)
        for path in paths:
            # path 包含起点和终点,以及中间的 type=2 节点
            node_b = path[-1]
            path_type2 = path[1:-1]  # 提取中间的 type=2 节点

            # 在约化图 G_reduced 中添加一条边
            G_reduced.add_edge(node_a, node_b, atomlist=path)

            # 从原图中删除中间的 type=2 节点
            G.remove_nodes_from(path_type2)
            del_atom_num += len(path_type2)
            # 不删除 node_b,因为可能还需要从 node_b 继续找它的其它链
            # 处理完 node_a 出发的所有链后,把它删除
        G.remove_nodes_from([node_a])
        del_atom_num += 1
    print(del_atom_num)

    return G_reduced


def __reduce_graph2(G):
    # error only has 21168 atoms
    """
    对原图 G 进行约化,返回一个新的图 G_reduced:
      1. 只保留 type=1 的原子作为节点;
      2. 若两节点 (type=1) 之间存在仅由若干 type=2 节点连接的路径,
         则在 G_reduced 中添加一条边,并将这些 type=2 节点存储到此边的 'atomlist' 属性中.

    注意:本实现假设在原图 G 中,两个 type=1 节点之间若通过 type=2 相连,
          则路径不分叉(linear chain).
    """
    # 创建一个新的 MultiGraph(可改成 Graph,如果你并不需要多重边)
    G_reduced = nx.MultiGraph()

    # 收集所有 type=1 的节点
    type1_nodes = [n for n, d in G.nodes(data=True) if d.get('type') == '1']

    # 将所有 type=1 节点及其属性复制到 G_reduced
    for n in type1_nodes:
        G_reduced.add_node(n, **G.nodes[n])

    # 为了避免在遍历时修改图的结构,这里可以先记录要删除的节点,最后统一删除
    nodes_to_remove = set()

    # 遍历每一个 type=1 节点
    for node_a in type1_nodes:
        # 如果 node_a 已经在原图 G 中被删除(例如之前处理其它链时删掉),就跳过
        if node_a not in G:
            continue

        # 看看 node_a 的邻居有哪些
        neighbors = list(G[node_a])  # G[node_a] 是邻接表
        for neighbor in neighbors:
            # 如果这个邻居已经不在图里,跳过
            if neighbor not in G:
                continue

            # 1. 邻居若是另一个 type=1 节点,直接连边(无中间 type=2)
            if G.nodes[neighbor].get('type') == '1':
                # 在 G_reduced 上加一条边,atomlist 为空
                G_reduced.add_edge(node_a, neighbor, atomlist=[])
                # 无需删除任何节点
                continue

            # 2. 邻居若是 type=2,则沿着这条不分叉链一直走,看能否遇到下一个 type=1
            if G.nodes[neighbor].get('type') == '2':
                atomlist = []
                current = neighbor
                atomlist.append(current)

                # 先暂时把 neighbor 加入待删除集合,等找到结尾再统一删除
                # 但若后面发现链断了,就不删除
                path_failed = False

                prev = node_a  # 前一个节点

                # 不断往下找,直到找到下一个 type=1 或走到头
                while True:
                    # 找 current 的所有邻居(排除刚来的 prev)
                    next_nodes = [nbr for nbr in G[current] if nbr != prev]

                    if not next_nodes:
                        # 没有后续了 -> 死胡同
                        path_failed = True
                        break

                    # 题目假设路径不分叉,所以 next_nodes 应该只有 1 个
                    next_node = next_nodes[0]

                    if G.nodes[next_node].get('type') == '1':
                        # 成功到达下一个 type=1 节点
                        node_b = next_node
                        # 在 G_reduced 添加一条 (node_a, node_b) 的边
                        G_reduced.add_edge(node_a, node_b, atomlist=atomlist)

                        # 把整条链上的 type=2 节点都删除(node_b 不删除)
                        nodes_to_remove.update(atomlist)
                        break
                    elif G.nodes[next_node].get('type') == '2':
                        # 继续往前走
                        atomlist.append(next_node)
                        prev = current
                        current = next_node
                    else:
                        # 出现既不是 type=1 也不是 type=2 的情况,或者别的异常情况
                        path_failed = True
                        break

                # 如果这条链失败了,则不要删除已经记录的 atomlist
                if path_failed:
                    # 不做任何事,跳过即可
                    pass

    # 统一删除在本轮中找到的所有 type=2 节点
    G.remove_nodes_from(nodes_to_remove)

    return G_reduced


def ___reduce_graph_1(G):
    # 太慢了
    """
    对原图 G 进行约化,返回一个新的图 G_reduced:
      1. 只保留 type=1 的原子作为节点;
      2. 若两节点 (type=1) 之间存在仅由若干 type=2 节点连接的路径,
         则在 G_reduced 中添加一条边,并将这些 type=2 节点存储到此边的 'atomlist' 属性中.
    """
    # 先找出所有 type=1 和 type=2 的节点
    type1_nodes = [n for n, d in G.nodes(data=True) if d.get('type') == '1']
    # 如果后面需要用到 type=2 节点,也可以记录
    # type2_nodes = [n for n, d in G.nodes(data=True) if d.get('type') == 2]

    # 初始化一个新的无向图(或 MultiGraph ,看需求)
    G_reduced = nx.MultiGraph()
    # 将所有 type=1 节点加入新图(带上原本的属性)
    for n in type1_nodes:
        G_reduced.add_node(n, **G.nodes[n])

    # 双重循环,寻找每对 type=1 节点之间是否有“仅经过 type=2”的路径
    # 为简化,这里只考虑最短路径;若需所有可能路径可使用 nx.all_simple_paths.
    for i in range(len(type1_nodes)):
        source = type1_nodes[i]
        for j in range(i+1, len(type1_nodes)):
            target = type1_nodes[j]

            # 尝试找最短路径
            try:
                path = nx.shortest_path(G, source=source, target=target)
            except nx.NetworkXNoPath:
                # 如果没有路径,跳过
                continue

            # 检查 path 上除首尾外,是否均为 type=2
            # 如果 path = [source, ..., target],中间节点若都是 type=2,即可满足要求
            middle_nodes = path[1:-1]
            if all(G.nodes[m]['type'] == '2' for m in middle_nodes):
                # 确认此路径只经过若干 type=2 节点
                # 那么就在 G_reduced 中添加一条边
                # 并将这些中间节点 ID 放入 'atomlist' 属性
                G_reduced.add_edge(
                    source,
                    target,
                    atomlist=middle_nodes
                )

    return G_reduced


def data_file_to_networkx_1(filepath):
    # 2025-03-16
    # 有问题 1不能考虑多边 2 把所有的孤立交联点考虑了进去.
    """
    将 LAMMPS 数据文件转换为 NetworkX MultiGraph ,并在边的 data 中增加 atomlist.

    - filepath: LAMMPS data 文件的路径. 
    - 返回: 一个带有 atomlist 信息的 NetworkX MultiGraph
    """
    # 初始化 MultiGraph 对象
    G = nx.MultiGraph()

    # 使用 MDAnalysis 读取 LAMMPS data 数据文件
    u = mda.Universe(filepath)

    # 提取类型为 '1' 的原子,并添加到图中
    type_1_atoms = {atom.id for atom in u.atoms if atom.type == '1'}
    G.add_nodes_from(type_1_atoms)

    # 构建邻接字典
    adjacency = defaultdict(set)
    for bond in u.bonds:
        atom1, atom2 = bond.atoms
        id1, id2 = atom1.id, atom2.id
        adjacency[id1].add(id2)
        adjacency[id2].add(id1)

    # 使用 BFS/逐步遍历逻辑,从每个类型 '1' 的原子开始,
    # 找到其他类型 '1' 的原子并连接,同时记录经过的原子链
    processed = set()
    for start in type_1_atoms:
        # 遍历 start 原子的所有直接相邻原子
        for neighbor in adjacency[start]:
            if neighbor in type_1_atoms:
                # 如果 neighbor 也是 type_1,并且这条边没有被记录过
                if (start, neighbor) not in processed and (neighbor, start) not in processed:
                    # 对于直接相连的 type=1 原子,atomlist 就是 [start, neighbor]
                    G.add_edge(start, neighbor, atomlist=[start, neighbor])
                    processed.add((start, neighbor))
            else:
                # 如果 neighbor 不是 type_1,需要继续往下找,直到找到 type=1 或无路可走
                path = [start, neighbor]
                current = neighbor
                previous = start

                while current not in type_1_atoms:
                    next_atoms = adjacency[current] - {previous}  # 避免走回头路
                    if not next_atoms:
                        # 没有可继续前进的原子,说明无法到达另一个 type=1
                        break
                    # 随便拿一个 next_atom 来继续前进;若有多个分支可视需求处理
                    next_atom = next_atoms.pop()
                    path.append(next_atom)
                    previous, current = current, next_atom

                # 如果成功到达另一端 type=1 原子,并且没有被记录
                if current in type_1_atoms:
                    if (start, current) not in processed and (current, start) not in processed:
                        # path 中包含了 start -> ... -> current 的所有原子
                        G.add_edge(start, current, atomlist=path)
                        processed.add((start, current))

    # 如果需要对节点编号进行 -1 处理,可保持此操作
    G = subtract_one_from_node_ids(G)

    return G


def map_file_to_networkx(filepath):
    # 2025-03-17
    # map 中的ID 比 data 文件 中的ID小 1
    # 初始化一个 MultiGraph 对象
    G = nx.MultiGraph()

    with open(filepath, 'r') as f:
        lines = f.readlines()

    # 解析文件,找到 'bondlist' 部分
    bond_start = lines.index('bondlist\n')  # 'bondlist' 后面的是键(bond)信息
    chain_data = lines[:bond_start]  # 获取所有链的信息
    # bond_data = lines[bond_start + 1:]  # 获取 bondlist 后的键信息

    # 添加链的交联点为节点,并添加边
    for line in chain_data:
        if line.strip():  # 跳过空行
            chain_id, chain_len, crosslink, atomlist, bondlist = line.split(
                ';')

            # 处理 crosslink,将其转换为列表
            crosslink = eval(crosslink)  # crosslink 是首末连接点的列表,比如 ['A', 'B']

            # 处理 atomlist 和 bondlist,去掉换行符并转换为列表
            atomlist = eval(atomlist.strip())  # 转换为列表,假设数据为字符串形式的列表
            bondlist = eval(bondlist.strip())  # 转换为列表,去掉换行符

            # 将链的交联点作为图的节点
            if len(crosslink) == 2:  # 确保交联点是两个节点
                node1, node2 = crosslink  # 取出交联点

                # 添加边,边的 id 为 chain_id,属性为 atomlist 和 bondlist
                G.add_edge(node1, node2, chain_id=chain_id, chain_len=int(chain_len),
                           atomlist=[node1]+atomlist+[node2], bondlist=bondlist)

    return G

# 2025-03-17 利用map 和 data 得到单个有坐标的 garph


def map_data2graph(map_path, data_path):
    """
    2025-03-17
    1. 从 map_path 读取 .map 文件,转换为 NetworkX Graph
    2. 用 MDAnalysis 读取 LAMMPS data 文件 data_path, use: graph.graph['box']
    3. 将 data 文件中的坐标赋值给 graph 的每个节点. use:graph.nodes[node]['posi'] 
    """
    # 1) 从 .map 文件读取图
    graph = map_file_to_networkx(map_path)

    # 2) 用 MDAnalysis 读取 LAMMPS data 文件
    #    format='LAMMPSDATA' 适用于常规的 LAMMPS data 文件
    u = mda.Universe(data_path)

    # 3) 记录盒子属性到 graph 的 box
    # u.dimensions = [lx, ly, lz, alpha, beta, gamma]
    graph.graph['box'] = u.dimensions.copy()

    # 4) 为每个节点赋予 posi 属性
    #    注意: graph 的节点 ID 比 data 文件中的 atom ID 小1. 因为 map 从 0 开始计数.而 data 从一
    for node in graph.nodes():
        # 注意:MDAnalysis 里面的索引 a-1 得到 ID 为 a 的, 索引是所有的原子利用 ID 的排序
        graph.nodes[node]['posi'] = u.atoms[node].position.copy()
    # 5) 为每个 edge 赋予 u_atom_list 属性
    #    遍历所有边,读取 edge 中 atomlist 字段下的所有原子 ID,然后获取对应的 MDAnalysis atom 对象,
    #    最后以字典的形式添加到 edge 属性中,键为原子 ID,值为 u.atoms 中的 atom 对象.
    for n1, n2, key, edge in graph.edges(keys=True, data=True):
        atom_ids = edge.get('atomlist', [])
        u_atom_dict = {}
        for atom_id in atom_ids:
            # 获取 atom_id 对应的 MDAnalysis atom 对象
            u_atom_dict[atom_id] = u.atoms[atom_id]
        edge['u_atom_list'] = u_atom_dict
    return graph
