from scipy.stats import pearsonr
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import os
import pickle
from scipy.stats import pointbiserialr

import GraphTools.graph_method as gm
import GraphTools.analysis as am
import GraphTools.basic_method as bm


def series_broken_point(frame_series):
    """
    使用二分法查找第一个满足 broken_judge_posi 为 True 的 frame_series 的 key 值.

    :param frame_series: dict, 包含时间戳和相关数据的字典
    :return: 第一个满足条件的 key 值,如果没有找到则返回 None
    """
    keys = sorted(frame_series.keys())
    left, right = 0, len(keys) - 1

    result = None
    while left <= right:
        mid = (left + right) // 2
        key = keys[mid]

        if broken_judge_posi(frame_series[key]):
            result = key  # 记录当前满足条件的 key
            right = mid - 1  # 尝试寻找更小的 key
        else:
            left = mid + 1  # 向右继续查找

    return result


def broken_judge_posi(frame, timesteps=''):
    """
    frame: 一个字典,表示一个帧 keys 有 graph: 网络图,假设已存储了节点坐标等信息
    目标 找到最大连通子图,得到所有节点的 x 坐标并进行 得到其 distribution.
    返回 broken_flag (True 代表断裂)
    """

    graph = frame['graph']

    # -------------------------
    # 1. 找到图中的最大连通子图 G
    G = find_largest_connected_graph(graph)  # 诱导子图,只保留最大连通子图中的节点

    # -------------------------
    # 2. 记录所有节点的 x 坐标
    x_coords = [data['posi'][0]
                for node, data in G.nodes(data=True) if 'posi' in data]

    # Plot the histogram
    bins = 50
    hist, bin_edges = np.histogram(x_coords, bins=bins)

    plt.hist(x_coords, bins=bins, edgecolor='black')
    plt.xlabel('x_coords')
    plt.ylabel('Frequency')
    plt.title('Histogram of x_coords')
    plt.savefig(f'posi_x_hist_,timesteps={timesteps}.png')
    plt.close()
    broken_flag = not all(hist != 0)
    return broken_flag


def broken_judge_topo(frame):
    """
    graph: 网络图,假设已存储了节点坐标等信息
    目标:找到最大连通子图,得到所有节点的 x 坐标并进行 5 等分,依次判断各等分区间内的“方向”.
    返回:broken_flag (True or False)
    """

    # -------------------------
    # 0. 解析 BOX_BOUNDS
    # -------------------------
    # 题目给出的 BOX_BOUNDS = [
    #   '2.6864716523783412e+00 3.9313528347622452e+01',
    #   '2.6864716523783412e+00 3.9313528347622452e+01',
    #   '2.6864716523783412e+00 3.9313528347622452e+01'
    # ]
    # 这里假设第一项对应 x 方向,第二项对应 y 方向,第三项对应 z 方向.
    # 每一项都是 "lower_bound upper_bound" 的字符串.
    BOX_BOUNDS = frame['BOX_BOUNDS']
    graph = frame['graph']
    box_lower_x, box_upper_x = parse_box_bounds(BOX_BOUNDS[0])
    # 其中 parse_box_bounds 是一个帮助函数,用来把字符串解析为 float 值
    #
    # def parse_box_bounds(bound_str):
    #     lb_str, ub_str = bound_str.split()
    #     return float(lb_str), float(ub_str)

    # -------------------------
    # 1. 找到图中的最大连通子图 G
    G = find_largest_connected_graph(graph)  # 诱导子图,只保留最大连通子图中的节点

    # -------------------------
    # 2. 记录所有节点的 x 坐标,找出 div_num=5 等分点(PBC 条件)
    x_coords = get_x_coordinate(G)  # get_x_coordinate(n) 返回节点 n 的 x 坐标
    div_num = 10
    div_points = find_closest_points(
        x_coords, box_lower_x, box_upper_x, div_num)

    # -------------------------
    # 3. 在等分点之间寻找最短路径,并根据每段最短路径进行方向判断
    # -------------------------
    # 示例:div_points = [p0, p1, p2, p3, p4, p5]
    # 需要依次在 p0->p1, p1->p2, p2->p3, p3->p4, p4->p5 之间做判断
    # 若任何一段判断为 broken,则直接返回 True
    # 若全部无异常,则返回 False
    #
    # 说明:此处的 source/target 节点需要根据实际情况,
    # 找出最接近这些坐标的节点,或者使用一些插值/映射的方法来确定节点 ID.

    for i in range(div_num - 1):
        source_node = div_points[i][0]
        target_node = div_points[i+1][0]

        # 调用网络最短路径算法
        # nx.shortest_path(G, source=source_node, target=target_node)
        path = nx.shortest_path(G, source=source_node, target=target_node)

        # 根据 path 中节点的坐标走势,是否越过边界等信息,进行方向判断
        div_node_posix = [G.nodes[node]['posi'][0] for node in path]
        broken_flag = direction_judge(
            div_node_posix, box_upper_x - box_lower_x)

        if broken_flag:
            return True

    # 如果所有区间段都检测无异常,则返回 False
    return False


def direction_judge(div_node_posix, box_len):
    """
    根据 div_node_posix 进行差分并求和来判断方向,考虑周期性边界条件.

    参数:
        div_node_posix (list[int/float]): 一个表示位置的数组.
        box_len (float): 盒子的长度.

    返回:
        float: 差分求和的结果.
    """
    if not div_node_posix or len(div_node_posix) < 2:
        # 如果数组为空或长度小于 2,无法进行差分,返回 0
        return 0

    # 差分数组,考虑周期性边界条件
    diff_array = []
    for i in range(len(div_node_posix) - 1):
        diff = div_node_posix[i + 1] - div_node_posix[i]
        # 调整差值以考虑周期性边界条件
        if diff > box_len / 2:
            diff -= box_len
        elif diff < -box_len / 2:
            diff += box_len
        diff_array.append(diff)

    # 求和
    sum_diff = sum(diff_array)

    return sum_diff


def direction_judge_old(div_node_posix):
    """
    判断下一个点是否比当前点大,并计算比例.

    参数:
        div_node_posix (list or array): 节点位置的数值列表.

    返回:
        tuple: (方向列表, 比例值),其中方向列表是布尔值列表,
               表示每个点是否比第一个点大;比例值是大于的点数占总点数的比例.
    """
    if not div_node_posix or len(div_node_posix) < 2:
        raise ValueError("div_node_posix 必须包含至少两个点.")

    # 判断每个点是否比第一个点大
    direction_list = [div_node_posix[i] > div_node_posix[i-1]
                      for i in range(1, len(div_node_posix))]

    # 计算比第一个点大的点的比例
    positive_count = sum(direction_list)
    total_count = len(direction_list)
    ratio = positive_count / total_count if total_count > 0 else 0

    return ratio < 0.2


def plot_second_element_distribution(data):
    """
    绘制元组列表中第二个元素的分布图.

    参数:
        data (list of tuple): 元组列表,其中每个元组包含两个元素.
    """
    # 提取元组中的第二个值
    second_elements = [item[1] for item in data]

    # 绘制分布图
    plt.figure(figsize=(10, 6))
    plt.hist(second_elements, bins=20, edgecolor='black', alpha=0.7)
    plt.title('Distribution of Second Elements', fontsize=16)
    plt.xlabel('Second Element Value', fontsize=14)
    plt.ylabel('Frequency', fontsize=14)
    plt.grid(axis='y', linestyle='--', alpha=0.7)
    plt.savefig('test.png')


def find_closest_points(x_coords, box_lower_x, box_upper_x, div_num):
    """
    找到 x_coords 中与等分点 partition_points 最近的 div_num 个点.

    参数:
        x_coords: list,包含 (node_id, x_position) 的元组列表.
        box_lower_x: float,x 方向的最小值.
        box_upper_x: float,x 方向的最大值.
        div_num: int,划分的区间数.

    返回:
        list: 包含与 partition_points 最近的 div_num 个点的节点 ID.
    """
    Lx = box_upper_x - box_lower_x  # x 方向长度
    dx = Lx / div_num
    # 位置的最小值从 0 开始
    partition_points = [
        i * dx for i in range(0, div_num)  # i = 0,1,2,3,... 共 div_num 个点
    ]

    closest_points = []
    for p in partition_points:
        # 在 x_coords 中找到距离 p 最近的点
        closest = min(x_coords, key=lambda x: abs(x[1] - p))
        closest_points.append(closest)  # 记录节点 ID

    return closest_points


def parse_box_bounds(bound_str):
    lb_str, ub_str = bound_str.split()
    return float(lb_str), float(ub_str)


def find_largest_connected_graph(graph):
    """
    找到 MultiGraph 中的最大连通子图.
    参数:
        graph (networkx.MultiGraph): 输入的 MultiGraph 对象.
    返回:
        networkx.MultiGraph: 最大的连通子图.
    """

    # 获取所有连通子图的节点集合
    connected_components = list(nx.connected_components(graph))

    # 找到节点数最多的连通子图
    largest_component_nodes = max(connected_components, key=len)

    # 从原始图中提取出该连通子图
    largest_subgraph = graph.subgraph(largest_component_nodes).copy()

    return largest_subgraph


def induced_subgraph(graph, nodes):
    """
    返回图 graph 上由 nodes 诱导的子图
    """
    # 视具体图结构而定
    pass


def get_x_coordinate(G):
    """
    获取图 G 中所有节点的 x 坐标,并按 x 坐标从小到大排序.

    参数:
        G: networkx 图,其中每个节点都有 posi 属性,表示 (x, y) 坐标.

    返回:
        list: 包含 (node_id, x_position) 的元组列表,按 x_position 从小到大排序.
    """
    x_coords = [(node, data['posi'][0])
                for node, data in G.nodes(data=True) if 'posi' in data]
    return sorted(x_coords, key=lambda x: x[1])


def get_sp_directions_in_interval(G, x_left, x_right):
    """
    在 x ∈ [x_left, x_right) 区域内找到一个或多个“sp”并返回它们的方向(+1 / -1)
    """
    # 这里根据题意,需要自己定义“sp”是什么,如何判断其方向
    return []


def average_direction(d_list):
    """
    简单将方向列表做个平均或投票
    若全部 +1 则返回 +1,有负有正则返回其它
    这里只是示例
    """
    if not d_list:
        # 没有找到 sp 时,可以返回一个默认值,或者根据场景处理
        return 1
    s = sum(d_list)
    # 如果总和大于 0,就算作正向;小于 0 算作负向;等于 0 时根据需要处理
    if s > 0:
        return 1
    elif s < 0:
        return -1
    else:
        return 0


# ------------------ 几个重要的分析函数 ------------------


def graph_series_to_bond_strains_serise(graph_series):
    # 得到 graph_series 的 x_strain信息: bond_strains_serise
    note = 'x_strain'
    timestep_list = sorted(list(graph_series.keys()))
    time_total_strain_ratio = {timestep: 0 for timestep in timestep_list}
    graph_initial = graph_series[timestep_list[0]]
    bond_strains_serise = {}
    for timestep in timestep_list[1:]:
        graph_finale = graph_series[timestep]
        x_strains_dict, total_strain_ratio = gm.plot_x_strain_hist(
            graph_initial, graph_finale, note=f'_timestep={timestep}')
        bond_strains_serise[timestep] = (x_strains_dict, total_strain_ratio)
        time_total_strain_ratio[timestep] = total_strain_ratio
    return bond_strains_serise, timestep_list

def calculate_initial_correlations(graph_series, mission_dir):
    bond_strains_serise, timestep_list = graph_series_to_bond_strains_serise(
        graph_series)
    # 获取初始图及其对应的 GEBC 和 GEBC_m1 值
    graph_initial = graph_series[timestep_list[0]]['graph']
    GEBC_initial = {}
    GEBC_m1_initial = {}

    for u, v, key, data in graph_initial.edges(keys=True, data=True):
        edge_id = (u, v, key)
        GEBC_initial[edge_id] = data.get('GEBC', 0)
        GEBC_m1_initial[edge_id] = data.get('GEBC_m1', 0)

    correlations_GEBC = []
    correlations_GEBC_m1 = []
    pvalues_GEBC = []
    pvalues_GEBC_m1 = []

    macro_strain = [0]
    # 对每个时刻计算 Pearson 相关系数及其 p 值
    for time in timestep_list[1:]:
        x_strains_dict, _ = bond_strains_serise[time]
        graph_current = graph_series[time]['graph']

        macro_strain.append(am.get_strain_x(
            graph_series[0], graph_series[time]))

        current_edges = set((u, v, k)
                            for u, v, k in graph_current.edges(keys=True))

        common_edges = current_edges
        GEBC_values = [GEBC_initial[edge] for edge in common_edges]
        GEBC_m1_values_initial = [GEBC_m1_initial[edge]
                                  for edge in common_edges]
        strain_values = [x_strains_dict[edge] for edge in common_edges]

        # 计算 GEBC 与 Strain 的相关系数和 p 值
        if len(GEBC_values) > 1:
            corr_GEBC, pval_GEBC = pearsonr(GEBC_values, strain_values)
            correlations_GEBC.append(corr_GEBC)
            pvalues_GEBC.append(pval_GEBC)
        else:
            correlations_GEBC.append(np.nan)
            pvalues_GEBC.append(np.nan)

        # 计算 GEBC_m1 与 Strain 的相关系数和 p 值
        if len(GEBC_m1_values_initial) > 1:
            corr_GEBC_m1, pval_GEBC_m1 = pearsonr(
                GEBC_m1_values_initial, strain_values)
            correlations_GEBC_m1.append(corr_GEBC_m1)
            pvalues_GEBC_m1.append(pval_GEBC_m1)
        else:
            correlations_GEBC_m1.append(np.nan)
            pvalues_GEBC_m1.append(np.nan)

    # 绘制图像并保存,使用上下两个子图分别绘制相关系数和P值
    save_path = os.path.join(mission_dir, 'analysis_correlations')
    os.makedirs(save_path, exist_ok=True)

    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 10), sharex=True)

    # 上图:相关系数
    ax1.plot(macro_strain[1:], correlations_GEBC,
             label='Correlation: GEBC (initial)')
    ax1.plot(macro_strain[1:], correlations_GEBC_m1,
             label='Correlation: GEBC_m1 (initial)')
    ax1.axhline(0, color='red',  linewidth=0.8, )  # 添加细红线
    ax1.set_ylabel('Pearson Correlation Coefficient')
    ax1.set_title('Correlation between initial GEBCs and Strain Over Time')
    ax1.legend(loc='upper right')
    ax1.grid(True)

    # 下图:P值
    ax2.plot(macro_strain[1:], pvalues_GEBC,
             linestyle='--', label='P-value: GEBC')
    ax2.plot(macro_strain[1:], pvalues_GEBC_m1,
             linestyle='--', label='P-value: GEBC_m1')
    ax2.axhline(0.05, color='red', linewidth=0.8,
                label='Statistical Significance = 0.05')  # 添加细红线
    ax2.set_xlabel('Strain')
    ax2.set_ylabel('P-value')
    ax2.legend(loc='upper right')
    ax2.grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(
        save_path, 'Correlation_and_Pvalue_between_initial_GEBCs_and_Strain.png'))
    # plt.show()

    # 将数据存储到文件中
    pkl_path = os.path.join(
        save_path, 'Correlation_and_Pvalue_between_initial_GEBCs_and_Strain.pkl')
    with open(pkl_path, 'wb') as f:
        pickle.dump({
            'GEBC_initial': GEBC_initial,
            'GEBC_m1_initial': GEBC_m1_initial,
            'correlations_GEBC': correlations_GEBC,
            'correlations_GEBC_m1': correlations_GEBC_m1,
            'pvalues_GEBC': pvalues_GEBC,
            'pvalues_GEBC_m1': pvalues_GEBC_m1
        }, f)

    return correlations_GEBC, correlations_GEBC_m1, pvalues_GEBC, pvalues_GEBC_m1


def calculate_initial_correlations_broken(graph_series, mission_dir):
    # 获取所有时刻并按顺序排序
    timestep_list = sorted(graph_series.keys())

    # 计算并存储每帧的宏观应变
    strain_dict = compute_strain_dict(graph_series)

    # 获取初始图及其对应的 GEBC 和 GEBC_m1 值
    graph_initial = graph_series[timestep_list[0]]['graph']
    GEBC_initial = {}
    GEBC_m1_initial = {}
    for u, v, key, data in graph_initial.edges(keys=True, data=True):
        edge_id = (u, v, key)
        GEBC_initial[edge_id] = data.get('GEBC', 0)
        GEBC_m1_initial[edge_id] = data.get('GEBC_m1', 0)

    correlations_GEBC = []
    correlations_GEBC_m1 = []
    pvalues_GEBC = []
    pvalues_GEBC_m1 = []
    macro_strain = []  # 只记录有计算的帧

    # 逐帧计算
    for i in range(1, len(timestep_list)):
        prev_time = timestep_list[i - 1]
        curr_time = timestep_list[i]
        prev_graph = graph_series[prev_time]['graph']
        curr_graph = graph_series[curr_time]['graph']

        # 从预先计算的字典获取应变
        strain = strain_dict.get(curr_time)
        if strain is None:
            raise ValueError(f"Strain value not found for time step {curr_time}")

        # 计算断裂边
        broken_dict = collect_broken_bonds(prev_graph, curr_graph)

        # 如果没有断裂边则跳过该帧,同时不记录该帧的应变
        if not broken_dict:
            continue

        # 记录应变
        macro_strain.append(strain)

        # 获取上一时刻所有有效边
        prev_edges = set(prev_graph.edges(keys=True))
        valid_edges = [e for e in prev_edges if e in GEBC_initial]

        # 构造断裂指示器
        broken_indicator = [1 if e in broken_dict else 0 for e in valid_edges]

        # 提取初始 GEBC 数据
        gebc_vals = [GEBC_initial[e] for e in valid_edges]
        gebc_m1_vals = [GEBC_m1_initial[e] for e in valid_edges]

        # 计算点二列相关,样本数>1才计算
        if len(valid_edges) > 1:
            res1 = pointbiserialr(broken_indicator, gebc_vals)
            res2 = pointbiserialr(broken_indicator, gebc_m1_vals)
            corr1, p1 = res1.correlation, res1.pvalue
            corr2, p2 = res2.correlation, res2.pvalue
        else:
            corr1, p1 = np.nan, np.nan
            corr2, p2 = np.nan, np.nan

        correlations_GEBC.append(corr1)
        correlations_GEBC_m1.append(corr2)
        pvalues_GEBC.append(p1)
        pvalues_GEBC_m1.append(p2)

    # 创建输出目录
    save_path = os.path.join(mission_dir, 'analysis_correlations')
    os.makedirs(save_path, exist_ok=True)

    # 绘图
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 10), sharex=True)
    ax1.plot(macro_strain, correlations_GEBC, label='GEBC')
    ax1.plot(macro_strain, correlations_GEBC_m1, label='GEBC_m1')
    ax1.axhline(0, color='red', linewidth=0.8)
    ax1.set_ylabel('Correlation')
    ax1.legend()
    ax1.grid(True)

    ax2.plot(macro_strain, pvalues_GEBC, '--', label='p GEBC')
    ax2.plot(macro_strain, pvalues_GEBC_m1, '--', label='p GEBC_m1')
    ax2.axhline(0.05, color='red', linewidth=0.8)
    ax2.set_xlabel('Strain')
    ax2.set_ylabel('p-value')
    ax2.legend()
    ax2.grid(True)

    plt.tight_layout()
    fig.savefig(os.path.join(save_path,
                             'Correlation_Pvalue_initial_GEBCs_broken.png'))
    plt.close(fig)

    # 保存结果
    with open(os.path.join(save_path,
                           'Correlation_Pvalue_initial_GEBCs_broken.pkl'), 'wb') as f:
        pickle.dump({
            'macro_strain': macro_strain,
            'corr_GEBC': correlations_GEBC,
            'corr_GEBC_m1': correlations_GEBC_m1,
            'p_GEBC': pvalues_GEBC,
            'p_GEBC_m1': pvalues_GEBC_m1
        }, f)

    return correlations_GEBC, correlations_GEBC_m1, pvalues_GEBC, pvalues_GEBC_m1


def calculate_time_step_correlations(graph_series, mission_dir):
    bond_strains_serise, timestep_list = graph_series_to_bond_strains_serise(
        graph_series)

    correlations_GEBC_time = []
    correlations_GEBC_m1_time = []
    pvalues_GEBC_time = []
    pvalues_GEBC_m1_time = []

    macro_strain = [0]
    # 对每个时间步计算 Pearson 相关系数和对应的 p 值
    for time in timestep_list[1:]:
        x_strains_dict, _ = bond_strains_serise[time]
        graph_current = graph_series[time]['graph']
        current_edges = set((u, v, k)
                            for u, v, k in graph_current.edges(keys=True))
        macro_strain.append(am.get_strain_x(
            graph_series[0], graph_series[time]))
        common_edges = current_edges
        strain_values_current = [x_strains_dict[edge] for edge in common_edges]

        # 计算 GEBC 与 strain 的相关系数和 p 值
        GEBC_values_current = [graph_current.edges[u, v, k].get(
            'GEBC', 0) for u, v, k in common_edges]
        if len(GEBC_values_current) > 1:
            corr_GEBC_current, p_GEBC_current = pearsonr(
                GEBC_values_current, strain_values_current)
            correlations_GEBC_time.append(corr_GEBC_current)
            pvalues_GEBC_time.append(p_GEBC_current)
        else:
            correlations_GEBC_time.append(np.nan)
            pvalues_GEBC_time.append(np.nan)

        # 计算 GEBC_m1 与 strain 的相关系数和 p 值
        GEBC_m1_values_current = [graph_current.edges[u, v, k].get(
            'GEBC_m1', 0) for u, v, k in common_edges]
        if len(GEBC_m1_values_current) > 1:
            corr_GEBC_m1_current, p_GEBC_m1_current = pearsonr(
                GEBC_m1_values_current, strain_values_current)
            correlations_GEBC_m1_time.append(corr_GEBC_m1_current)
            pvalues_GEBC_m1_time.append(p_GEBC_m1_current)
        else:
            correlations_GEBC_m1_time.append(np.nan)
            pvalues_GEBC_m1_time.append(np.nan)

    save_path = os.path.join(mission_dir, 'analysis_correlations')
    os.makedirs(save_path, exist_ok=True)

    # 创建上下两个子图的图像
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 10), sharex=True)

    # 上图:绘制相关系数
    ax1.plot(macro_strain[1:], correlations_GEBC_time,
             label='Correlation: GEBC (current)')
    ax1.plot(macro_strain[1:], correlations_GEBC_m1_time,
             label='Correlation: GEBC_m1 (current)')
    ax1.axhline(0, color='red', linewidth=0.8)  # 添加横线
    ax1.set_ylabel('Pearson Correlation Coefficient')
    ax1.set_title('Correlation between GEBCs and Strain Over Time')
    ax1.legend(loc='upper right')
    ax1.grid(True)

    # 下图:绘制 p 值
    ax2.plot(macro_strain[1:], pvalues_GEBC_time,
             linestyle='--', label='P-value: GEBC (current)')
    ax2.plot(macro_strain[1:], pvalues_GEBC_m1_time,
             linestyle='--', label='P-value: GEBC_m1 (current)')
    ax2.axhline(0.05, color='red', linewidth=0.8,
                label='Statistical Significance = 0.05')  # 添加横线
    ax2.set_xlabel('Strain')
    ax2.set_ylabel('P-value')
    ax2.legend(loc='upper right')
    ax2.grid(True)

    plt.tight_layout()
    plt.savefig(os.path.join(
        save_path, 'Correlation_and_Pvalue_between_GEBCs_and_Strain_Over_Time.png'))

    # 保存相关数据到 pkl 文件
    pkl_path = os.path.join(
        save_path, 'Correlation_and_Pvalue_between_GEBCs_and_Strain_Over_Time.pkl')
    with open(pkl_path, 'wb') as f:
        pickle.dump({
            'timestep_list': timestep_list,
            'correlations_GEBC_time': correlations_GEBC_time,
            'correlations_GEBC_m1_time': correlations_GEBC_m1_time,
            'pvalues_GEBC_time': pvalues_GEBC_time,
            'pvalues_GEBC_m1_time': pvalues_GEBC_m1_time
        }, f)

    return correlations_GEBC_time, correlations_GEBC_m1_time, pvalues_GEBC_time, pvalues_GEBC_m1_time


# 2025-02-23 基础函数:找到所有的 input.lammps 文件.
def find_lammps_files(space_path):
    """
    遍历指定目录,直到找到所有的 input.lammps 文件.
    参数:
        space_path (str): 起始的目录路径.
    返回:
        list: 包含所有 input.lammps 文件完整路径的列表.
    """
    # 初始化一个列表来存储所有找到的 input.lammps 文件路径
    lammps_paths = []

    # 遍历 space_path 目录及其子目录
    for root, dirs, files in os.walk(space_path):
        for file in files:
            # 如果文件名是 input.lammps
            if file == "input.lammps":
                # 记录文件的完整路径
                lammps_paths.append(root)

    return lammps_paths

# 2025-02-23 对比一个拉伸过程的初末态,得到所有断裂的键盘.


def collect_broken_bonds(initial_graph, last_graph):

    # 2025-03-13 注意,会有bond 断裂但是 chain 无断裂的情况
    # 就是在已经断开的一条链的某个位置又发生了断裂,导致这个frame 内选入,但是两个graph之间无edge 的断裂

    # 用于存放断裂的键
    broken_bond = []

    # 计算两张图之间的边数量差
    diff = initial_graph.number_of_edges() - last_graph.number_of_edges()

    # 如果边数量相同,则没有断裂的键,直接返回空列表
    if diff == 0:
        return broken_bond

    # 否则,开始遍历 initial_graph 所有边,判断是否在 last_graph 中存在
    for u, v, key in initial_graph.edges(keys=True):
        if not last_graph.has_edge(u, v, key):
            broken_bond.append((u, v, key))
            # 如果已经收集到所有断裂的键,就不再继续查找
            if len(broken_bond) == diff:
                return broken_bond

    return broken_bond

    # 计算并返回应变率
    return (L_current - L_initial) / L_initial

# 2025-02-23  由 graph_series 绘制 broken_bond 随机的增加曲线


def __del_plot_broken_bonds(graph_series, save_flag=True, edge_break_check=True):
    # Initialize list to store the cumulative number of broken bonds
    broken_nums = [0]
    strains = [0]

    time_steps = list(graph_series.keys())
    time_steps.sort()

    cumulative_broken_num = 0  # Initialize cumulative broken bonds count
    time_steps_edge_changed = []
    # Iterate through the time steps and calculate the cumulative broken bonds
    for time_id in range(len(time_steps) - 1):

        prev_graph = graph_series[time_steps[time_id]]['graph']
        next_graph = graph_series[time_steps[time_id + 1]]['graph']
        # Assuming `collect_broken_bonds` is a function that returns a list of broken bonds
        broken_num = len(collect_broken_bonds(prev_graph, next_graph))
        if broken_num == 0 and edge_break_check:
            continue
        # Update the cumulative broken number
        cumulative_broken_num += broken_num
        time_steps_edge_changed.append(time_steps[time_id])
        broken_nums.append(cumulative_broken_num)

        strains.append(graph_series[time_steps[time_id+1]]['STRAIN_X'])

    if save_flag:
        # Plot the cumulative broken bonds over time steps
        plt.figure(figsize=(10, 6))
        plt.plot(strains, broken_nums, marker='o', linestyle='-',
                 color='b', label='Cumulative Broken Bonds')
        plt.xlabel('Strain_x')
        plt.ylabel('Cumulative Number of Broken Bonds')
        plt.title('Cumulative Broken Bonds Over Time')
        plt.grid(True)
        plt.legend()

        # Save the plot as an image
        plt.savefig('cumulative_broken_bonds_plot.png')
        plt.close()
        print("Plot saved as 'cumulative_broken_bonds_plot.png'")

        # Open a file in write mode
        with open('output.txt', 'w') as file:
            # Write headers
            file.write('Strain\tBroken_Bonds\n')

            # Write data
            for strain, broken_num in zip(strains, broken_nums):
                file.write(f'{strain}\t{broken_num}\n')

        print("Data saved to output.txt")
    else:
        return (time_steps[1:], strains), broken_nums  # 准备好两种横坐标模式,任君取用, \[勾引]


def plot_broken_bonds(graph_series, save_flag=True, edge_break_check=True):
    """
    Plot or return cumulative broken bonds versus x-strain computed from the initial graph state.

    Args:
        graph_series (dict): Mapping time -> {'graph': MultiGraph, ...}.
        save_flag (bool): If True, generates and saves plot and data files; otherwise returns data.
        edge_break_check (bool): If True, skips frames with zero broken bonds.

    Returns:
        If save_flag is False:
            ((event_times, strains), broken_nums)
                event_times (list): Timestamps where broken bonds occurred.
                strains (list): Corresponding x-strain values relative to initial frame.
                broken_nums (list): Cumulative broken bond counts at each event time.
    """
    # 1. Sort timestamps and get initial graph reference
    times = sorted(graph_series.keys())
    initial_graph = graph_series[times[0]]['graph']

    # 2. Initialize counters and storage
    broken_nums = [0]
    strains = [0.0]
    cumulative_broken = 0
    event_times = []

    # 3. Iterate through consecutive frames
    for prev_t, next_t in zip(times, times[1:]):
        G_prev = graph_series[prev_t]['graph']
        G_next = graph_series[next_t]['graph']

        # 3a. Identify broken bonds between frames
        broken_list = collect_broken_bonds(G_prev, G_next)
        num_broken = len(broken_list)
        if num_broken == 0 and edge_break_check:
            continue

        # 3b. Update cumulative count and record event
        cumulative_broken += num_broken
        broken_nums.append(cumulative_broken)
        event_times.append(next_t)

        # 3c. Compute x-strain relative to initial state
        eps_x = bm.compute_box_strain_rate(initial_graph, G_next)
        strains.append(eps_x)

    # 4. Save plot and data or return values
    if save_flag:
        out_plot = 'cumulative_broken_bonds_plot.png'
        out_data = 'output.txt'

        # 4a. Plot cumulative broken bonds vs strain
        plt.figure(figsize=(10, 6))
        plt.plot(strains, broken_nums, marker='o', linestyle='-')
        plt.xlabel('Strain_x')
        plt.ylabel('Cumulative Number of Broken Bonds')
        plt.title('Cumulative Broken Bonds Over Strain')
        plt.grid(True)
        plt.savefig(out_plot)
        plt.close()
        print(f"Plot saved as '{out_plot}'")

        # 4b. Write data to file
        with open(out_data, 'w') as f:
            f.write('Strain_x\tCumulative_Broken_Bonds\n')
            for eps, cnt in zip(strains, broken_nums):
                f.write(f"{eps}\t{cnt}\n")
        print(f"Data saved to '{out_data}'")
    else:
        return (event_times, strains), broken_nums

# 2025-05-07


def compute_strain_dict(graph_series):
    """
    Compute the x-direction strain for each time step and return a mapping of time -> strain.

    Args:
        graph_series (dict): Mapping time -> {'graph': MultiGraph, ...}.

    Returns:
        dict: Keys are time stamps, values are ε_x relative to the initial frame.
    """
    times = sorted(graph_series.keys())
    strain_map = {}
    if not times:
        return strain_map

    # Reference initial graph frame
    initial_graph = graph_series[times[0]]['graph']
    # Initial strain = 0
    strain_map[times[0]] = 0.0

    # Compute strain for each subsequent frame
    for t in times[1:]:
        current_graph = graph_series[t]['graph']
        eps_x = bm.compute_box_strain_rate(initial_graph, current_graph)
        strain_map[t] = eps_x
    return strain_map


def universe_strain_time_series(universe):
    """
    计算 MDAnalysis Universe 的 x 方向宏观应变率随时间的变化。

    参数
    ----
    universe : MDAnalysis.Universe
        已加载的拓扑和轨迹对象。

    返回
    ----
    times   : list of float
        每帧的模拟时间（ts.time，单位与轨迹一致）。
    strains : list of float
        对应时刻的 x 方向宏观应变率，计算公式为 (Lx(ts) − Lx0) / Lx0。

    说明
    ----
    - ts.dimensions 格式： [lx, ly, lz, alpha, beta, gamma]  
      • lx, ly, lz：盒子长度  
      • alpha, beta, gamma：盒子夹角（°）
    - 只关心 x 方向长度，即 dimensions[0]。
    """
    # 初始帧 x 长度
    ts0 = universe.trajectory[0]
    L0 = ts0.dimensions[0]

    times = []
    strains = []
    for ts in universe.trajectory:
        Lx = ts.dimensions[0]
        times.append(ts.time)
        strains.append((Lx - L0) / L0)

    return times, strains
