# 2024-11-21 包含许多的 graph_series 操作
import multiprocessing
from concurrent.futures import ProcessPoolExecutor, as_completed
import time
import pickle
import numpy as np
import matplotlib.pyplot as plt
import MDAnalysis as mda
import GraphTools.network_method as nm
import GraphTools.basic_method as bm
import os

from GraphTools.analysis import *

import warnings
# 忽略特定的警告
warnings.filterwarnings('ignore', message='Guessed all Masses to 1.0')
warnings.filterwarnings('ignore', message='Reader has no dt information')


def bond_file2select_frames(bond_path, N_break_frame=None):
    # 解析 bond 文件,提取断键信息为 result 字典

    result = nm.break_frame(bond_path)

    filtered_result = {}
    # Sort the keys in result for efficient search
    sorted_timesteps = sorted(result.keys())

    # Find the nearest timestep in result for each timestep in xyz_timestep_list
    for xyz_time in xyz_timestep_list:
        # Find the closest timestep in sorted_timesteps to xyz_time
        closest_time = min(sorted_timesteps, key=lambda t: abs(t - xyz_time))
        # Assign the data for closest_time to the xyz_time key in the filtered_result
        filtered_result[xyz_time] = result[closest_time]

    # 如果未指定 N_break_frame,则返回所有的帧数据
    if N_break_frame is None or len(filtered_result) <= N_break_frame:
        return filtered_result, []

    # 获取所有的时间步和断裂键的数量
    timesteps = sorted(filtered_result.keys())
    total_broken_bonds = 0
    broken_bonds_cumulative = []

    # 计算累积断键数
    for i, timestep in enumerate(timesteps):
        if i == 0:
            broken_bonds_cumulative.append(0)
        else:
            broken_bonds = len(
                filtered_result[timestep].get('BROKEN_BONDS', []))
            total_broken_bonds += broken_bonds
            broken_bonds_cumulative.append(total_broken_bonds)

    # 计算每个区间的目标断键数
    target_broken_bonds_per_segment = total_broken_bonds / N_break_frame

    # 选择帧,使得每两个帧之间的断键数量尽可能接近目标断键数
    selected_frames_dict = {
        timesteps[0]: filtered_result[timesteps[0]]}  # 首先选择初始帧
    current_target = target_broken_bonds_per_segment
    bond_differences = []

    for i in range(1, len(timesteps)):
        if broken_bonds_cumulative[i] >= current_target:
            selected_frames_dict[timesteps[i]] = filtered_result[timesteps[i]]
            # 计算断键数量差
            if len(selected_frames_dict) > 1:
                prev_frame_timestep = list(selected_frames_dict.keys())[-2]
                bond_difference = broken_bonds_cumulative[i] - \
                    broken_bonds_cumulative[timesteps.index(
                        prev_frame_timestep)]
                bond_differences.append(bond_difference)
            current_target += target_broken_bonds_per_segment

        # 如果已经选择了足够的帧,则停止选择
        if len(selected_frames_dict) >= N_break_frame:
            break

    # 确保末态帧也被选择
    if timesteps[-1] not in selected_frames_dict:
        prev_frame_timestep = list(selected_frames_dict.keys())[-1]
        bond_difference = broken_bonds_cumulative[-1] - \
            broken_bonds_cumulative[timesteps.index(prev_frame_timestep)]
        selected_frames_dict[timesteps[-1]] = filtered_result[timesteps[-1]]
        bond_differences.append(bond_difference)

    return selected_frames_dict, bond_differences


def ___old__bond_file2select_frames(bond_path, N_break_frame=None, xyz_timestep_list=None):
    # 解析 bond 文件,提取断键信息为 result 字典

    result = nm.break_frame(bond_path)
    if xyz_timestep_list is None:
        return result, []
    # 在 result 只保留在和 xyz_timestep_list 中的 timestep 最接近的帧
    filtered_result = {}
    # Sort the keys in result for efficient search
    sorted_timesteps = sorted(result.keys())

    # Find the nearest timestep in result for each timestep in xyz_timestep_list
    for xyz_time in xyz_timestep_list:
        # Find the closest timestep in sorted_timesteps to xyz_time
        closest_time = min(sorted_timesteps, key=lambda t: abs(t - xyz_time))
        # Assign the data for closest_time to the xyz_time key in the filtered_result
        filtered_result[xyz_time] = result[closest_time]

    # 如果未指定 N_break_frame,则返回所有的帧数据
    if N_break_frame is None or len(filtered_result) <= N_break_frame:
        return filtered_result, []

    # 获取所有的时间步和断裂键的数量
    timesteps = sorted(filtered_result.keys())
    total_broken_bonds = 0
    broken_bonds_cumulative = []

    # 计算累积断键数
    for i, timestep in enumerate(timesteps):
        if i == 0:
            broken_bonds_cumulative.append(0)
        else:
            broken_bonds = len(
                filtered_result[timestep].get('BROKEN_BONDS', []))
            total_broken_bonds += broken_bonds
            broken_bonds_cumulative.append(total_broken_bonds)

    # 计算每个区间的目标断键数
    target_broken_bonds_per_segment = total_broken_bonds / N_break_frame

    # 选择帧,使得每两个帧之间的断键数量尽可能接近目标断键数
    selected_frames_dict = {
        timesteps[0]: filtered_result[timesteps[0]]}  # 首先选择初始帧
    current_target = target_broken_bonds_per_segment
    bond_differences = []

    for i in range(1, len(timesteps)):
        if broken_bonds_cumulative[i] >= current_target:
            selected_frames_dict[timesteps[i]] = filtered_result[timesteps[i]]
            # 计算断键数量差
            if len(selected_frames_dict) > 1:
                prev_frame_timestep = list(selected_frames_dict.keys())[-2]
                bond_difference = broken_bonds_cumulative[i] - \
                    broken_bonds_cumulative[timesteps.index(
                        prev_frame_timestep)]
                bond_differences.append(bond_difference)
            current_target += target_broken_bonds_per_segment

        # 如果已经选择了足够的帧,则停止选择
        if len(selected_frames_dict) >= N_break_frame:
            break

    # 确保末态帧也被选择
    if timesteps[-1] not in selected_frames_dict:
        prev_frame_timestep = list(selected_frames_dict.keys())[-1]
        bond_difference = broken_bonds_cumulative[-1] - \
            broken_bonds_cumulative[timesteps.index(prev_frame_timestep)]
        selected_frames_dict[timesteps[-1]] = filtered_result[timesteps[-1]]
        bond_differences.append(bond_difference)

    return selected_frames_dict, bond_differences


def write_posi_and_box_to_graph(graph, universe):
    """
    将位置信息添加到图的每个节点上,并将 box 尺寸信息作为图的一个属性.

    :param graph: networkx 图对象
    :param universe: MDAnalysis Universe 对象,包含 xyz 轨迹
    """
    # 将 box 尺寸保存到图的属性中,仅保存一次
    graph.graph['box'] = universe.dimensions

    for node in graph.nodes:
        atom_id = node  # graph 中 节点编号与 .xyz .data 中的 atom_id 小 1
        try:
            # MDAnalysis 用 a 索引在 .xyz .data 中的 a+1 id 的原子.
            atom_position = universe.atoms[atom_id].position
            graph.nodes[node]['posi'] = atom_position
        except IndexError:
            print(
                f"Atom ID {atom_id} is out of range for the current universe frame.")


def equal_interval(alist, N_break_frame):
    # 获取数列的总长度
    total_items = len(alist)

    # 确保我们不会选择超过原始列表的长度
    if N_break_frame > total_items:
        return alist

    # 计算每个区间的目标索引
    target_interval = total_items / (N_break_frame - 1)

    # 确保初始帧被选择
    selected_indices = [0]
    current_target = target_interval

    for i in range(1, total_items):
        if i >= current_target:
            selected_indices.append(i)
            current_target += target_interval

        # 如果已经选择了足够的帧,则停止选择
        if len(selected_indices) >= N_break_frame - 1:
            break

    # 确保末尾帧被选择
    if selected_indices[-1] != total_items - 1:
        selected_indices.append(total_items - 1)

    # 根据选中的索引获取相应的帧
    selected_values = [alist[i] for i in selected_indices]

    return selected_values


def map_bond_xyz2graph_series(map_path, bond_path, xyz_path):
    # 2025-04-15: 修改后版本
    # 目标:对于 broken_time 之前的所有 universe 帧,都建立对应的 graph,
    # 同时更新图中节点的位置(每帧都更新),并在有断裂事件时更新边的状态.

    # 0. 读取 map 文件并创建初始图对象
    graph = nm.map_file_to_networkx(map_path)

    # 1. 利用 xyz 文件创建 universe 对象
    universe = mda.Universe(xyz_path, format='LAMMPSDUMP')

    # 1.1 通过 find_broken_frame 确定断裂帧,从而获得 broken_time
    broken_frame, _ = find_broken_frame(universe)
    # 注意这里取的是 broken_frame-1 对应的轨迹,得到该帧的时间
    universe.trajectory[broken_frame - 1]
    broken_time = int(universe.trajectory.time)

    # 2. 读取 bond 文件,过滤出时间在 broken_time 之前的断裂信息
    result_full = nm.break_frame(bond_path)
    result = {k: v for k, v in result_full.items() if k <= broken_time}

    # 3. 提取所有 universe 中的时间帧(一般从 xyz 文件中获取时间步列表),
    #    并筛选出时间小于等于 broken_time 的帧
    time_step_list = bm.extract_timesteps(xyz_path)
    universe_frames = [ts for ts in time_step_list if ts <= broken_time]

    # 用于存放所有帧对应的图的字典
    graph_series = {}

    # 4. 处理初始帧:直接读取位置,盒子信息,并写入图中
    initial_frame = universe_frames[0]
    # 将 universe 轨迹切换到初始帧(这里通过 time_step_list 的 index 定位帧)
    universe.trajectory[time_step_list.index(initial_frame)]
    write_posi_and_box_to_graph(graph, universe)
    graph_series[initial_frame] = {'graph': graph}

    # 5. 依次处理后续帧
    # 每个新帧基于上一帧的 graph 拷贝而来,再更新节点位置,
    # 如果当前帧存在于 result 中,则同时处理键断裂,移除对应的边.
    for frame in universe_frames[1:]:
        # 获取上一帧的时间值(注意 universe_frames 列表是有序的)
        prev_frame = universe_frames[universe_frames.index(frame) - 1]
        prev_graph = graph_series[prev_frame]['graph']
        # 深拷贝上一帧的 graph,保证独立性
        current_graph = prev_graph.copy()

        # 如果当前帧在 result 中存在键断裂信息,则更新边(移除断裂的键)
        if frame in result:
            broken_bonds = result[frame].get('BROKEN_BONDS', [])
            for bond in broken_bonds:
                # bond 中的原子编号来自 LAMMPS,图中的节点编号比其小 1
                node1_lammps_id, node2_lammps_id = bond
                node1, node2 = node1_lammps_id - 1, node2_lammps_id - 1
                # 遍历当前图中所有边,找到包含这两个节点的边后删除
                for u, v, key, data in current_graph.edges(data=True, keys=True):
                    atom_list = data['atomlist']
                    if node1 in atom_list and node2 in atom_list:
                        current_graph.remove_edge(u, v, key=key)
                        break  # 找到一条满足条件的边就退出当前循环

        # 不论是否有键断裂,位置和盒子信息均需要更新到当前帧图中
        # 将 universe 轨迹切换到当前帧(根据时间步列表定位)
        universe.trajectory[time_step_list.index(frame)]
        write_posi_and_box_to_graph(current_graph, universe)

        # 保存当前帧更新后的 graph
        graph_series[frame] = {'graph': current_graph}

    return graph_series


def process_graph_series(graph_series, mission_dir, modeflag='GEBC', note='', bins=80):
    """
    生成 modeflag='GEBC'or 'GEBC_m1' 的 GIF演化和热力图

    参数:
        graph_series: 包含各帧图数据的字典,key是时间,value是对应的图(networkx.Graph对象).
        mission_dir: 任务的目录路径.
        modeflag: 边属性的名称,用于读取每个边的属性值.
        note: 备注,用于输出文件的命名.
        bins: 直方图的箱数.

    返回:
        gif_frames: GIF帧的图像路径列表.
        gif_output_path: 生成的GIF文件的路径.
        heatmap_path: 生成的热力图文件的路径.
    """
    import os
    import numpy as np
    import matplotlib.pyplot as plt
    from PIL import Image

    # 创建必要的目录
    analysis_path = os.path.join(mission_dir, 'analysis_heatmap_GIFs')
    os.makedirs(analysis_path, exist_ok=True)
    output_path = os.path.join(analysis_path, f'gif_frames_{note}')
    os.makedirs(output_path, exist_ok=True)
    gif_frames = []

    # 第一遍遍历,收集所有的 modeflag 值,以确定全局的 x 轴范围
    all_modeflag_values = []
    for time, system in graph_series.items():
        graph = system  # ['graph']
        # 收集边的 modeflag 值
        modeflag_values = [data.get(modeflag, 1)
                           for u, v, data in graph.edges(data=True)]
        all_modeflag_values.extend(modeflag_values)

    # 确定 x 轴的全局最小值和最大值
    min_value = 0
    # max_value = max(all_modeflag_values, default=1)
    # # NOTE 手动设置
    max_value = 0.005

    # 定义 bins 的边界
    bin_edges = np.linspace(min_value, max_value, bins + 1)

    # 第二遍遍历,计算每帧的直方图以确定 y 轴的最大值,并收集数据用于热力图
    y_max = 0
    modeflag_distributions = []
    timesteps = []
    sorted_times = sorted(graph_series.keys())  # 确保时间步是按顺序的
    for time in sorted_times:
        graph = graph_series[time]  # ['graph']
        modeflag_values = [data.get(modeflag, 0)
                           for u, v, data in graph.edges(data=True)]
        counts, _ = np.histogram(modeflag_values, bins=bin_edges)
        modeflag_distributions.append(counts)
        timesteps.append(time)
        current_max = counts.max() if counts.size > 0 else 0
        y_max = max(y_max, current_max)

    # 第三遍遍历,绘制直方图并保存图像
    for idx, time in enumerate(sorted_times):
        counts = modeflag_distributions[idx]
        # 绘制直方图
        plt.figure()
        plt.bar(bin_edges[:-1], counts, width=np.diff(bin_edges),
                align='edge', edgecolor='black')
        plt.title(f'Time {time}')
        plt.xlabel(modeflag)
        plt.ylabel('Count')
        plt.xlim(min_value, max_value)
        plt.ylim(0, y_max)
        plt.xticks(rotation=45)  # 调整X轴刻度的显示,避免重叠
        plt.ticklabel_format(style='plain', axis='x')  # 禁用科学计数法
        image_path = os.path.join(output_path, f'frame_{time}.png')
        plt.savefig(image_path, bbox_inches='tight')  # 调整边距以适应所有标签
        plt.close()
        gif_frames.append(image_path)

    # 创建 GIF
    images = [Image.open(frame) for frame in gif_frames]
    gif_output_path = os.path.join(
        analysis_path, f'{modeflag}_distribution_{note}.gif')
    images[0].save(
        gif_output_path,
        save_all=True,
        append_images=images[1:],
        duration=1500,  # 每帧持续时间(毫秒)
        loop=10
    )

    # 创建热力图
    # 将 modeflag_distributions 转换为 numpy 数组
    modeflag_distributions = np.array(modeflag_distributions)

    plt.figure(figsize=(10, 8))
    plt.imshow(modeflag_distributions, aspect='auto',
               origin='lower', cmap='hot')
    plt.colorbar(label='Frequency')
    plt.xlabel(modeflag)
    plt.ylabel('Time Step')
    plt.title(f'{modeflag} Distribution Over Time')
    # todo 把当前的坐标刻度映射到 min_value, max_value 的范围,同时用科学计数法
    # 设置 y 轴刻度对应到时间步
    plt.yticks(range(len(timesteps)), timesteps)
    heatmap_path = os.path.join(
        analysis_path, f"{modeflag}_heatmap_{note}.png")
    plt.savefig(heatmap_path, bbox_inches='tight')  # 调整边距以适应所有标签
    plt.close()

    return gif_frames, gif_output_path, heatmap_path


def graph_series_To_GEBCs(graph_series, mission_dir, modeflag='GEBC', note='', bins=80):
    # 多余的函数
    """
    生成 modeflag='GEBC'or 'GEBC_m1' 的 GIF演化和热力图

    参数:
        graph_series: 包含各帧图数据的字典,key是时间,value是对应的图(networkx.Graph对象).
        mission_dir: 任务的目录路径.
        modeflag: 边属性的名称,用于读取每个边的属性值.
        note: 备注,用于输出文件的命名.
        bins: 直方图的箱数.

    返回:
        gif_frames: GIF帧的图像路径列表.
        gif_output_path: 生成的GIF文件的路径.
        heatmap_path: 生成的热力图文件的路径.
    """
    import os
    import numpy as np
    import matplotlib.pyplot as plt
    from PIL import Image

    # 创建必要的目录
    analysis_path = os.path.join(mission_dir, 'analysis')
    os.makedirs(analysis_path, exist_ok=True)
    output_path = os.path.join(analysis_path, f'gif_frames_{note}')
    os.makedirs(output_path, exist_ok=True)
    gif_frames = []

    # 第一遍遍历,收集所有的 modeflag 值,以确定全局的 x 轴范围
    all_modeflag_values = []
    for time, system in graph_series.items():
        graph = system['graph']
        # 收集边的 modeflag 值
        modeflag_values = [data.get(modeflag, 0)
                           for u, v, data in graph.edges(data=True)]
        all_modeflag_values.extend(modeflag_values)

    # 确定 x 轴的全局最小值和最大值
    min_value = 0
    max_value = max(all_modeflag_values, default=1)
    # # NOTE 手动设置
    # max_value = 0.005

    # 定义 bins 的边界
    bin_edges = np.linspace(min_value, max_value, bins + 1)

    # 第二遍遍历,计算每帧的直方图以确定 y 轴的最大值,并收集数据用于热力图
    y_max = 0
    modeflag_distributions = []
    timesteps = []
    sorted_times = sorted(graph_series.keys())  # 确保时间步是按顺序的
    for time in sorted_times:
        graph = graph_series[time]['graph']
        modeflag_values = [data.get(modeflag, 0)
                           for u, v, data in graph.edges(data=True)]
        counts, _ = np.histogram(modeflag_values, bins=bin_edges)
        modeflag_distributions.append(counts)
        timesteps.append(time)
        current_max = counts.max() if counts.size > 0 else 0
        y_max = max(y_max, current_max)

    # 第三遍遍历,绘制直方图并保存图像
    for idx, time in enumerate(sorted_times):
        counts = modeflag_distributions[idx]
        # 绘制直方图
        plt.figure()
        plt.bar(bin_edges[:-1], counts, width=np.diff(bin_edges),
                align='edge', edgecolor='black')
        plt.title(f'Time {time}')
        plt.xlabel(modeflag)
        plt.ylabel('Count')
        plt.xlim(min_value, max_value)
        plt.ylim(0, y_max)
        plt.xticks(rotation=45)  # 调整X轴刻度的显示,避免重叠
        plt.ticklabel_format(style='plain', axis='x')  # 禁用科学计数法
        image_path = os.path.join(output_path, f'frame_{time}.png')
        plt.savefig(image_path, bbox_inches='tight')  # 调整边距以适应所有标签
        plt.close()
        gif_frames.append(image_path)

    # 创建 GIF
    images = [Image.open(frame) for frame in gif_frames]
    gif_output_path = os.path.join(
        analysis_path, f'{modeflag}_111distribution_{note}.gif')
    images[0].save(
        gif_output_path,
        save_all=True,
        append_images=images[1:],
        duration=1500,  # 每帧持续时间(毫秒)
        loop=10
    )

    # 创建热力图
    # 将 modeflag_distributions 转换为 numpy 数组
    modeflag_distributions = np.array(modeflag_distributions)

    plt.figure(figsize=(10, 8))
    plt.imshow(modeflag_distributions, aspect='auto',
               origin='lower', cmap='hot')
    plt.colorbar(label='Frequency')
    plt.xlabel(modeflag)
    plt.ylabel('Time Step')
    plt.title(f'{modeflag} Distribution Over Time')
    # todo 把当前的坐标刻度映射到 min_value, max_value 的范围,同时用科学计数法
    # 设置 y 轴刻度对应到时间步
    plt.yticks(range(len(timesteps)), timesteps)
    heatmap_path = os.path.join(
        analysis_path, f"{modeflag}_heatmap_{note}.png")
    plt.savefig(heatmap_path, bbox_inches='tight')  # 调整边距以适应所有标签
    plt.close()

    return gif_frames, gif_output_path, heatmap_path

# graph_series_To_strain


def find_differing_fields(target_network, batch_list):
    import re
    from collections import defaultdict

    def parse_network_name(network_name):
        """
        解析network_name字符串,将其拆分为键值对字典.

        Args:
            network_name (str): 网络名称字符串,例如 'bondingprob0.9'

        Returns:
            dict: 键值对字典,例如 {'bondingprob': '0.9'}
        """
        # 修改正则表达式,确保只提取 N4 这种形式的字段
        pattern = re.compile(r'([a-zA-Z]+)([0-9\.\-]+)')
        fields = network_name.split('_')
        field_dict = {}
        for field in fields:
            match = pattern.match(field)
            if match:
                key, value = match.groups()
                field_dict[key] = value
            else:
                # 如果字段不符合模式,则将整个字段作为键,值设为None
                field_dict[field] = None
        return field_dict

    def extract_network_code(network_path):
        """
        从完整的网络路径中提取类似 'N4' 的网络名称.

        Args:
            network_path (str): 完整的路径或网络名称.

        Returns:
            str: 提取出的网络名称,如 'N4'.
        """
        # 提取路径中的最后一部分,并返回其后缀(如 N4,N7 等)
        return network_path.split('/')[-1]

    """
    找出target_network中与batch_list中其他network_names不同的字段,并以字符串形式返回.
    
    Args:
        target_network (str): 目标网络名称字符串.
        batch_list (list of str): 包含多个网络名称的列表.
    
    Returns:
        str: 由不同字段组成的字符串,字段之间用下划线分隔.
    """
    # 解析所有network_names
    parsed_networks = [parse_network_name(
        extract_network_code(name)) for name in batch_list]

    # 解析目标network
    target_fields = parse_network_name(extract_network_code(target_network))

    # 收集每个字段在其他network中的所有值
    field_values = defaultdict(set)
    for name, fields in zip(batch_list, parsed_networks):
        if name == target_network:
            continue  # 排除目标network
        for key, value in fields.items():
            field_values[key].add(value)

    # 找出在目标network中与其他network不同的字段
    differing_fields = []
    for key, value in target_fields.items():
        if key not in field_values:
            # 该字段仅在目标network中存在
            differing_fields.append(
                f"{key}{value}" if value is not None else key)
        elif value not in field_values[key]:
            # 其他network中不存在该值
            differing_fields.append(
                f"{key}{value}" if value is not None else key)

    # 将不同的字段组合成一个字符串
    differing_fields_str = '_'.join(differing_fields)

    return differing_fields_str


def find_broken_frame(universe, bins=50, save_hist=False):
    """
    根据每帧原子 x 坐标的直方图判断轨迹中是否断开:
      - 如果所有直方图 bin 均非0,则认为该帧"未断开"(good).
      - 如果存在 bin 为0,则认为该帧"断开"(broken).

    逻辑:
      1. 如果最后一帧是未断开的,则直接返回最后一帧的索引和总帧数.
      2. 否则,利用二分查找法在轨迹中寻找第一个断开的帧(即 broken_flag 为 True 的帧).

    参数
    ----
    universe : MDAnalysis.Universe
        MDAnalysis 的 Universe 对象.
    bins : int, optional
        直方图的 bin 个数,默认 50.
    save_hist : bool, optional
        是否保存每个帧的直方图,默认 False.

    返回
    ----
    tuple
        (first_broken_frame_index, total_frame_count, break_location_ratio)
        - first_broken_frame_index: 第一个断裂帧的索引
        - total_frame_count: 总帧数
        - break_location_ratio: 断裂位置在盒子长度中的比例(0-1之间)
        当所有帧均未断开时,break_location_ratio 为 None
    """
    n_frames = len(universe.trajectory)

    def analyze_frame(frame_idx, save_hist=False):
        """判断指定帧是否为未断开状态,并返回断裂位置"""
        universe.trajectory[frame_idx]
        x_coords = universe.atoms.positions[:, 0]

        # 获取模拟盒子在 x 方向的长度
        box_x = universe.trajectory[frame_idx].dimensions[0]
        x_range = (0, box_x)

        # 计算直方图
        hist, bin_edges = np.histogram(x_coords, bins=bins, range=x_range)

        # 计算每个bin的中心位置
        bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2

        # 找出值为0的bin的索引
        zero_bins = np.where(hist == 0)[0]

        if len(zero_bins) == 0:
            return True, None

        # 计算断裂位置(值为0的bin的中心位置的平均值)
        break_location = np.mean(bin_centers[zero_bins])
        # 将断裂位置转换为盒子长度的比例
        break_location_ratio = break_location / box_x

        if save_hist:
            plt.figure()
            plt.hist(x_coords, bins=bins, range=x_range, edgecolor='black')
            plt.axvline(x=break_location, color='r',
                        linestyle='--', label='Break Location')
            plt.xlabel('x_coords')
            plt.ylabel('Frequency')
            plt.title(f'Frame {frame_idx} - x_coords distribution')
            plt.xlim(x_range)
            plt.legend()
            plt.savefig(f'posi_x_hist_frame_{frame_idx}.png')
            plt.close()

        return False, break_location_ratio

    # 如果最后一帧未断开,则直接返回最后一帧的索引和总帧数
    is_good_last, _ = analyze_frame(n_frames - 1)
    if is_good_last:
        return n_frames, n_frames, None

    # 二分查找:寻找第一个断开的帧
    low, high = 0, n_frames - 1
    break_location_ratio = None
    while low <= high:
        mid = (low + high) // 2
        is_good, location_ratio = analyze_frame(mid)
        if is_good:
            low = mid + 1
        else:
            break_location_ratio = location_ratio  # 保存最后一次发现的断裂位置比例
            high = mid - 1
    # 输出 low+1 对应的时间 timestep
    universe.trajectory[low]
    # timestep = int(universe.trajectory.time)
    return low + 1, n_frames, break_location_ratio

# 一些临时的检查函数


def check_edge_modeflag_changes(graph_series, sorted_times, modeflag):
    """
    检查所有边的 data.get(modeflag, 0) 是否在时间序列中发生变化.

    参数:
        graph_series (dict): 时间序列图的字典,键为时间,值为图对象.
        sorted_times (list): 时间点的有序列表.
        modeflag (str): 要检查的边属性键.

    返回:
        dict: 包含发生变化的边及其 modeflag 值随时间变化的字典.
              格式:{(u, v): [value_time1, value_time2, ...]}
    """
    edge_modeflag_changes = {}

    # 遍历所有时间点
    for time in sorted_times:
        graph = graph_series[time]
        for u, v, data in graph.edges(data=True):
            # 获取边的唯一标识
            edge_key = (u, v)  # 如果是 MultiGraph,可能需要加上键 (u, v, key)
            # 获取 modeflag 的值
            modeflag_value = data.get(modeflag, 0)

            # 初始化字典
            if edge_key not in edge_modeflag_changes:
                edge_modeflag_changes[edge_key] = []

            # 添加 modeflag 值
            edge_modeflag_changes[edge_key].append(modeflag_value)

    # 检查每条边的 modeflag 是否发生变化
    edges_with_changes = {}
    for edge, values in edge_modeflag_changes.items():
        # 如果所有值不相同,则说明发生了变化
        if len(set(values)) > 1:
            edges_with_changes[edge] = values

    return edges_with_changes


def all_atomlist(graph):
    full_atom_list = list(graph.nodes())
    for u, v, key, data in graph.edges(data=True, keys=True):
        atom_list = data['atomlist']
        full_atom_list.extend(atom_list)
    return list(set(full_atom_list))

# 2025-02-16


def breaking_points_in_multiple_tasks(multi_massion_space):
    """
    遍历 multi_massion_space 下的所有文件夹,寻找 .xyz 文件,
    利用 MDAnalysis 的 Universe 构造模拟体系,并调用 gm.find_broken_frame
    得到 broken_frame,最后返回所有 broken_frame 及其平均值.
    """
    broken_frames = []  # 存放每个任务中得到的 broken_frame

    # 遍历所有子文件夹和文件
    for root, dirs, files in os.walk(multi_massion_space):
        for file in files:
            if file.endswith(".xyz"):
                xyz_path = os.path.join(root, file)
                try:
                    # 根据 xyz 文件创建 Universe 对象,注意 format 依据具体情况设置
                    universe = mda.Universe(xyz_path, format='LAMMPSDUMP')

                    # 使用 gm.find_broken_frame 函数计算 broken_frame
                    frame_idx, n_frames, _ = find_broken_frame(universe)
                    # print(root,': ', frame_idx)
                    broken_frames.append(frame_idx/n_frames)
                except Exception as e:
                    print(f"处理文件 {xyz_path} 时出错: {e}")

    # 计算所有 broken_frame 的平均值
    if broken_frames:
        avg_broken_frame = np.mean(broken_frames)
    else:
        avg_broken_frame = None

    return broken_frames, avg_broken_frame  # 返回的是一个比例


# 2025-02-21


def update_GEBC_in_graph(main_graph, sub_graph, GEBC_modeflag='GEBC'):
    # 1. 获取第一条边的属性并查找包含 'GEBC' 的 key
    _, _, attributes = list(sub_graph.edges(data=True))[0]
    flag = None
    for key in attributes:
        if GEBC_modeflag in key:
            flag = key
            break

    if not flag:
        raise ValueError(
            f"No {GEBC_modeflag} key found in the attributes of the edges.")

    # Iterate through all the edges in the main_graph
    for edge in main_graph.edges():
        # Loop over the keys in the sub_graph's edge to update main_graph
        for key in sub_graph[edge[0]][edge[1]]:
            # Retrieve the flag value from the sub_graph, if available
            value = sub_graph[edge[0]][edge[1]][key].get(flag, None)
            # Update the main_graph's edge with the corresponding flag value
            main_graph[edge[0]][edge[1]][key][flag] = value


def process_GEBC_write(task):
    """
    单个任务处理函数.
    参数 task 为一个元组:(timestep, func, graph, k_prop)
    其中 func 为需要执行的函数(nm.GEBC_write_save 或 nm.GEBC_m1_write_save).
    """
    timestep, func, graph, k_prop = task
    func(graph, k_prop=k_prop)
    return (timestep, func.__name__, graph)  # 返回修改后的 graph


def pkl_make(mission_dir_path, frame_num=50, k_prop=None, save_path='', note=''):
    # 2025-02-21
    # 使用的是 ProcessPoolExecutor 方法
    # 加载 pkl 文件的方法:
    # with open('frame_series.pkl', 'rb') as file:
    #     frame_series = pickle.load(file)

    start_time = time.time()
    # 查找指定目录下的必要文件
    map_path = None
    xyz_path = None
    bond_path = None
    for file_name in os.listdir(mission_dir_path):
        if file_name.endswith('.map'):
            map_path = os.path.join(mission_dir_path, file_name)
        elif file_name.endswith('.xyz'):
            xyz_path = os.path.join(mission_dir_path, file_name)
        elif file_name.endswith('out-bonds.dat'):
            bond_path = os.path.join(mission_dir_path, file_name)

    if map_path is None:
        raise FileNotFoundError("指定路径中未找到 .map 文件.")
    if xyz_path is None:
        raise FileNotFoundError("指定路径中未找到 .xyz 文件.")
    if bond_path is None:
        raise FileNotFoundError("指定路径中未找到 out-bonds.dat 文件.")

    # 根据输入文件生成图序列

    frame_series = map_bond_xyz2graph_series(map_path, bond_path, xyz_path)
    timestep_list = sorted(frame_series.keys())

    # 构建任务列表:每个时间步提交两个任务(分别处理 GEBC_write_save 与 GEBC_m1_write_save)
    tasks = []
    for timestep in timestep_list:
        graph = frame_series[timestep]['graph']
        tasks.append((timestep, nm.GEBC_write_save, graph, k_prop))
        tasks.append((timestep, nm.GEBC_m1_write_save, graph, k_prop))

    # 使用多进程池并行处理所有任务,max_workers 设置为系统的 CPU 核心数
    #! with ProcessPoolExecutor(max_workers=os.cpu_count()) as executor:
    with ProcessPoolExecutor(max_workers=4) as executor:
        print(f'Max CPU number: {os.cpu_count()}')
        futures = {executor.submit(
            process_GEBC_write, task): task for task in tasks}

        for future in as_completed(futures):
            task = futures[future]
            try:
                timestep, func_name, modified_graph = future.result()
                # 更新主进程中的 frame_series
                update_GEBC_in_graph(
                    frame_series[timestep]['graph'], modified_graph)

                # print(f"时间步 {timestep} 的 {func_name} 完成.")
            except Exception as exc:
                print(f"处理时间步 {task[0]} 函数 {task[1].__name__} 时出错: {exc}")

    # 保存生成的 frame_series 到 pickle 文件
    if save_path:
        # 检查 save_path 是否存在,如果不存在则创建
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        pkl_path = os.path.join(save_path, f'frame_series.pkl')
    else:
        pkl_path = os.path.join(mission_dir_path, f'frame_series.pkl')
    with open(pkl_path, 'wb') as file:
        pickle.dump(frame_series, file)
        print(f'生成 pkl 文件在 {pkl_path}')

    end_time = time.time()
    mda_duration = end_time - start_time
    time_info = f"mda_timesteps 方法用时: {mda_duration:.4f} 秒"
    print(time_info)

    # 在 save_path 下创建一个 log 文件,并将 time_info 写入其中
    log_file_path = os.path.join(save_path, "log.txt")

    with open(log_file_path, "a") as log_file:
        log_file.write(time_info + "\n")


def pkl_make_2(mission_dir_path, k_prop=None, save_path='', note=''):
    # 2025-04-19 相比于pkl_make_1, GEBC 变为了 并行,
    # GEBC_m1 是我手动进行并行计算的.

    # 2025-04-15
    # 新版本,没有抽样,所有的 帧都写入 graph

    start_time = time.time()
    # 查找指定目录下的必要文件
    map_path = None
    xyz_path = None
    bond_path = None
    for file_name in os.listdir(mission_dir_path):
        if file_name.endswith('.map'):
            map_path = os.path.join(mission_dir_path, file_name)
        elif file_name.endswith('.xyz'):
            xyz_path = os.path.join(mission_dir_path, file_name)
        elif file_name.endswith('out-bonds.dat'):
            bond_path = os.path.join(mission_dir_path, file_name)

    if map_path is None:
        raise FileNotFoundError("指定路径中未找到 .map 文件.")
    if xyz_path is None:
        raise FileNotFoundError("指定路径中未找到 .xyz 文件.")
    if bond_path is None:
        raise FileNotFoundError("指定路径中未找到 out-bonds.dat 文件.")

    # 根据输入文件生成图序列
    frame_series = map_bond_xyz2graph_series(map_path, bond_path, xyz_path)
    timestep_list = sorted(frame_series.keys())

    # 构建两个任务列表:
    # tasks_serial 用于 GEBC_write_save 任务(串行执行)
    # tasks_parallel 用于 GEBC_m1_write_save 任务(并行执行)
    tasks_serial = []
    tasks_parallel = []
    for timestep in timestep_list:
        graph = frame_series[timestep]['graph']
        tasks_serial.append((timestep, nm.GEBC_write_save, graph, k_prop))
        tasks_parallel.append(
            (timestep, nm.GEBC_m1_write_save, graph, k_prop))

    # 处理 GEBC_write_save 任务(串行计算)
    serial_start_time = time.time()
    with ProcessPoolExecutor(max_workers=4) as executor:
        print(f"GEBC 使用 {4} 核心进行并行计算")
        futures = {executor.submit(
            process_GEBC_write, task): task for task in tasks_serial}
        for future in as_completed(futures):
            task = futures[future]
            try:
                timestep, func_name, modified_graph = future.result()
                update_GEBC_in_graph(
                    frame_series[timestep]['graph'], modified_graph)
                # print(f"时间步 {timestep} 的 {func_name} 完成.")
            except Exception as exc:
                print(f"处理时间步 {task[0]} 函数 {task[1].__name__} 时出错: {exc}")
    serial_end_time = time.time()
    serial_duration = serial_end_time - serial_start_time
    print(f"GEBC_write_save 串行计算用时: {serial_duration:.4f} 秒")

    # 处理 GEBC_m1_write_save 任务(并行计算)
    parallel_start_time = time.time()
    max_workers = 90
    with ProcessPoolExecutor(max_workers=max_workers) as executor:
        print(f"GEBC_m1 使用 {max_workers} 核心进行并行计算")
        futures = {executor.submit(
            process_GEBC_write, task): task for task in tasks_parallel}
        for future in as_completed(futures):
            task = futures[future]
            try:
                timestep, func_name, modified_graph = future.result()
                update_GEBC_in_graph(
                    frame_series[timestep]['graph'], modified_graph)
                # print(f"时间步 {timestep} 的 {func_name} 完成.")
            except Exception as exc:
                print(f"处理时间步 {task[0]} 函数 {task[1].__name__} 时出错: {exc}")
    parallel_end_time = time.time()
    parallel_duration = parallel_end_time - parallel_start_time
    print(f"GEBC_m1_write_save 并行计算用时: {parallel_duration:.4f} 秒")

    # 保存生成的 frame_series 到 pickle 文件
    if save_path:
        # 检查 save_path 是否存在,如果不存在则创建
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        pkl_path = os.path.join(save_path, f'frame_series.pkl')
    else:
        pkl_path = os.path.join(mission_dir_path, f'frame_series.pkl')
    with open(pkl_path, 'wb') as file:
        pickle.dump(frame_series, file)
        print(f'生成 pkl 文件在 {pkl_path}')

    end_time = time.time()
    mda_duration = end_time - start_time
    time_info = f"mda_timesteps 方法用时: {mda_duration:.4f} 秒"
    print(time_info)

    # 在 save_path 下创建一个 log 文件,并将 time_info 写入其中
    log_file_path = os.path.join(save_path, "log.txt")
    with open(log_file_path, "a") as log_file:
        log_file.write(time_info + "\n")


def process_GEBC_m1_timed(task):
    """
    task: (timestep, graph, k_prop)
    返回: (timestep, modified_graph, elapsed_seconds)
    """
    timestep, graph, k_prop = task
    t0 = time.time()
    # 直接调用模块级的 GEBC_m1_write_save
    #! 1
    graph_initial = graph.copy()
    modified_graph = nm.GEBC_m1_write_save(graph, k_prop)

    assert graph_initial != graph

    elapsed = time.time() - t0
    return timestep, graph, elapsed


def pkl_parallel_gebc_m1(frame_series_pkl_path,
                         k_prop=None,
                         num_workers=None,
                         output_dir=None):
    # 1. 读入已有 frame_series
    with open(frame_series_pkl_path, 'rb') as f:
        frame_series = pickle.load(f)

    # 2. 准备并行任务列表 — 注意只传基础数据,不再传函数对象
    tasks = [
        (timestep,
         frame_series[timestep]['graph'],
         k_prop)
        for timestep in sorted(frame_series.keys())
    ]

    if num_workers is None:
        num_workers = os.cpu_count()

    num_workers = min(num_workers, len(tasks))

    print(f"并行计算 GEBC_m1,使用 {num_workers} 核心…")
    t0_all = time.time()

    # 3. 并行提交到顶层的 worker
    with ProcessPoolExecutor(max_workers=num_workers) as executor:
        futures = {
            executor.submit(process_GEBC_m1_timed, task): task[0]
            for task in tasks
        }
        for future in as_completed(futures):
            ts = futures[future]
            try:
                timestep, modified_graph, dur = future.result()
                # 更新原图并打印耗时
                update_GEBC_in_graph(frame_series[timestep]['graph'],
                                     modified_graph, GEBC_modeflag='GEBC_m1')
                print(f"[帧 {timestep}] GEBC_m1 耗时: {dur:.4f} 秒")
            except Exception as e:
                print(f"[错误] 帧 {ts} 计算失败:{e}")

    total_dur = time.time() - t0_all
    print(f"所有帧 GEBC_m1 并行计算完毕,总用时 {total_dur:.2f} 秒")

    # 4. 保存新的 pkl
    if output_dir:
        os.makedirs(output_dir, exist_ok=True)
        out_path = os.path.join(output_dir,
                                os.path.basename(frame_series_pkl_path))
    else:
        base, ext = os.path.splitext(frame_series_pkl_path)
        out_path = f"{base}_gebc_m1{ext}"

    with open(out_path, 'wb') as f:
        pickle.dump(frame_series, f)
    print(f"更新后的 frame_series 已保存到:{out_path}")

    return out_path


def serialize_u_atom_lists(G):
    """
    遍历 MultiGraph G 的所有边，将其中的 u_atom_list
    转换成只含基础 Python 类型的字典，存回边属性里。
    """
    from networkx import MultiGraph

    if not isinstance(G, MultiGraph):
        raise TypeError("请传入一个 nx.MultiGraph 对象")

    for u, v, key, data in G.edges(keys=True, data=True):
        atom_list = data.get('u_atom_list')
        if atom_list is None:
            continue

        if isinstance(atom_list, dict):
            serialized = {}
            for k, atom in atom_list.items():
                # 判断是否为 MDAnalysis Atom 对象
                if hasattr(atom, 'index') and hasattr(atom, 'position'):
                    info = {
                        'index':    atom.index,
                        'id':       atom.id,
                        'type':     atom.type,
                        'mass':     atom.mass,
                        'resnum':   atom.resnum,
                        'segid':    atom.segid,
                        'position': atom.position.tolist(),
                        'velocity': getattr(atom, 'velocity', None).tolist()
                        if getattr(atom, 'velocity', None) is not None
                        else None
                    }

                serialized[k] = info
            data['u_atom_list'] = serialized
        else:
            continue

    return G  # 添加返回语句
