import networkx as nx
import pickle
import pandas as pd
import os
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image

import GraphTools.basic_method as bm
# 绘图的各种函数


def plot_bond_data(bond_data, save_path):
    # 提取数据
    strains = []
    broken_entries = []

    # 获取初始的 NUMBER_OF_ENTRIES 值
    initial_number_of_entries = None
    for timestep, data in bond_data.items():
        if initial_number_of_entries is None:
            initial_number_of_entries = data['NUMBER_OF_ENTRIES']
        strains.append(data.get('STRAIN_X', 0))
        broken_entries.append(initial_number_of_entries -
                              data['NUMBER_OF_ENTRIES'])

    # 绘图
    fig, ax = plt.subplots()

    # 绘制应变和断裂数量的曲线
    ax.set_xlabel('Strain')
    ax.set_ylabel('Broken Bonds', color='tab:red')
    ax.plot(strains, broken_entries, color='tab:red', label='Broken Bonds')
    ax.tick_params(axis='y', labelcolor='tab:red')

    # 设置图例和标题
    fig.tight_layout()  # 调整布局
    plt.title('Broken Bonds vs Strain')

    # 设置图例和标题
    fig.tight_layout()  # 调整布局
    plt.title('Strain and Broken Bonds over Time')
    filename = 'out_Strain—Broken_Bonds'
    plt.savefig(os.path.join(save_path, f'{filename}.svg'))

    # 保存原始数据到同名的txt文件
    data_file_path = os.path.join(save_path, f'{filename}.txt')
    with open(data_file_path, 'w') as f:
        f.write('Strain\tBroken_Bonds\n')
        for strain, broken in zip(strains, broken_entries):
            f.write(f'{strain}\t{broken}\n')


def GEBC_hist_compare(network_graph1, network_graph2, full_dir_path, note1='initial', note2='final', modeflag='GEBC'):
    # Collect betweenness centrality values
    betweenness_values1 = [data[modeflag] for u, v, data in network_graph1.edges(
        data=True) if modeflag in data]
    betweenness_values2 = [data[modeflag] for u, v, data in network_graph2.edges(
        data=True) if modeflag in data]

    # Define the binning range based on the combined range of both distributions
    combined_min = min(min(betweenness_values1), min(betweenness_values2))
    combined_max = max(max(betweenness_values1), max(betweenness_values2))
    bins = 30  # Define the number of bins

    # Plot the histograms with shared bins
    plt.figure(figsize=(8, 6))
    plt.hist(betweenness_values1, bins=bins, range=(combined_min, combined_max),
             color='skyblue', alpha=0.5, label=f'{note1} Betweenness')
    plt.hist(betweenness_values2, bins=bins, range=(combined_min, combined_max),
             color='salmon', alpha=0.5, label=f'{note2} Betweenness')

    plt.title('Edge Betweenness Centrality Distribution Comparison')
    plt.xlabel('Betweenness Centrality')
    plt.ylabel('Frequency')
    plt.legend()
    plt.grid(True)

    # Save the figure
    save_path = os.path.join(full_dir_path, 'analysis')
    os.makedirs(save_path, exist_ok=True)
    comparison_hist_path = os.path.join(
        save_path, f'{modeflag}_{note1}_{note2}_comparison_hist.png')
    plt.savefig(comparison_hist_path)

    return


def GEBC_hist_single_old(network_graph, full_dir_path, note='current', modeflag='GEBC', modeflag1='GEBC_m1'):
    # Collect betweenness centrality values for the two metrics in the single network graph
    betweenness_values_modeflag = [
        data[modeflag] for u, v, data in network_graph.edges(data=True) if modeflag in data]
    betweenness_values_modeflag1 = [
        data[modeflag1] for u, v, data in network_graph.edges(data=True) if modeflag1 in data]

    # Define the binning range based on the combined range of both distributions
    combined_min = min(min(betweenness_values_modeflag),
                       min(betweenness_values_modeflag1))
    combined_max = max(max(betweenness_values_modeflag),
                       max(betweenness_values_modeflag1))
    bins = 30  # Define the number of bins

    # Plot the histograms with shared bins
    plt.figure(figsize=(8, 6))
    plt.hist(betweenness_values_modeflag, bins=bins, range=(combined_min,
             combined_max), color='skyblue', alpha=0.5, label=f'{modeflag} Betweenness')
    plt.hist(betweenness_values_modeflag1, bins=bins, range=(combined_min,
             combined_max), color='salmon', alpha=0.5, label=f'{modeflag1} Betweenness')

    plt.title(f'Betweenness Centrality Distribution Comparison for {note}')
    plt.xlabel('Betweenness Centrality')
    plt.ylabel('Frequency')
    plt.legend()
    plt.grid(True)

    # Save the figure
    save_path = os.path.join(full_dir_path, 'analysis')
    os.makedirs(save_path, exist_ok=True)
    comparison_hist_path = os.path.join(
        save_path, f'{modeflag}_{modeflag1}_{note}_comparison_hist.png')
    plt.savefig(comparison_hist_path)

    return comparison_hist_path


def GEBC_hist_single(network_graph, full_dir_path, note='current', modeflag='GEBC', modeflag1='GEBC_m1', bin_range=None, y_limit=None, bins=30):
    """
    绘制单个网络图的GEBC和GEBC_m1的介数中心性分布直方图,使用固定的坐标轴范围.

    参数:
        network_graph: 网络图对象.
        full_dir_path: 保存图像的目录路径.
        note: 图表标题中的注释.
        modeflag: 第一个介数中心性指标的键.
        modeflag1: 第二个介数中心性指标的键.
        bin_range: x轴范围 (min, max).
        y_limit: y轴的最大值.
        bins: 直方图的箱数.

    返回:
        comparison_hist_path: 保存的图像路径.
    """
    # 收集GEBC和GEBC_m1的介数中心性值
    betweenness_values_modeflag = [
        data[modeflag] for u, v, data in network_graph.edges(data=True) if modeflag in data]
    betweenness_values_modeflag1 = [
        data[modeflag1] for u, v, data in network_graph.edges(data=True) if modeflag1 in data]

    # 如果未提供bin_range,则自动计算
    if bin_range is None:
        combined_min = min(min(betweenness_values_modeflag, default=0), min(
            betweenness_values_modeflag1, default=0))
        combined_max = max(max(betweenness_values_modeflag, default=1), max(
            betweenness_values_modeflag1, default=1))
    else:
        combined_min, combined_max = bin_range

    # 绘制直方图
    plt.figure(figsize=(8, 6))
    plt.hist(betweenness_values_modeflag, bins=bins, range=(combined_min,
             combined_max), color='skyblue', alpha=0.5, label=f'{modeflag} Betweenness')
    plt.hist(betweenness_values_modeflag1, bins=bins, range=(combined_min,
             combined_max), color='salmon', alpha=0.5, label=f'{modeflag1} Betweenness')

    plt.title(f'Betweenness Centrality Distribution Comparison for {note}')
    plt.xlabel('Betweenness Centrality')
    plt.ylabel('Frequency')
    plt.legend()
    plt.grid(True)

    # 设置固定的y轴范围
    if y_limit is not None:
        plt.ylim(0, y_limit)

    # 保存图像
    save_path = os.path.join(full_dir_path, 'analysis')
    os.makedirs(save_path, exist_ok=True)
    comparison_hist_path = os.path.join(
        save_path, f'{modeflag}_{modeflag1}_{note}_comparison_hist.png')
    plt.savefig(comparison_hist_path)
    plt.close()  # 关闭图形以节省内存

    return comparison_hist_path


def create_gif_and_heatmap(bond_strains_series, network_mission_dir, note=''):
    """
    Create a GIF and a heatmap from bond_strains_series data.

    Parameters:
    - bond_strains_series: dict {timestep: (x_strains_dict, total_strain_ratio)}
    - network_mission_dir: str, directory to save the output files
    - note: str, optional note to append to filenames
    """
    import matplotlib.pyplot as plt
    import os
    import imageio
    import numpy as np
    from matplotlib import ticker

    # Save images
    save_path = os.path.join(network_mission_dir, 'analysis_heatmap_GIFs')
    os.makedirs(save_path, exist_ok=True)
    frame_dir = os.path.join(save_path, f'frame{note}')
    os.makedirs(frame_dir, exist_ok=True)

    # Ensure output directory exists
    if not os.path.exists(network_mission_dir):
        os.makedirs(network_mission_dir)

    # Sort the timesteps
    timesteps = sorted(bond_strains_series.keys())

    # For GIF: Generate histogram images for each timestep
    image_files = []

    # For heatmap: Collect data
    strain_distributions = []

    # Collect all filtered strains to determine consistent bins
    all_filtered_strains = []
    for timestep in timesteps:
        x_strains_dict, total_strain_ratio = bond_strains_series[timestep]
        strains = np.array(list(x_strains_dict.values()))
        # Remove NaN or infinite values
        filtered_strains = strains[np.isfinite(strains)]
        x_right_lim = 15
        filtered_strains = [
            strain for strain in filtered_strains if abs(strain) <= x_right_lim]
        all_filtered_strains.extend(filtered_strains)

    # Determine global bin edges for histograms
    bins = 100
    counts, global_bin_edges = np.histogram(all_filtered_strains, bins=bins)

    # Compute bin centers
    bin_centers = (global_bin_edges[:-1] + global_bin_edges[1:]) / 2

    # Now loop over timesteps again to create histograms and collect strain distributions
    for timestep in timesteps:
        x_strains_dict, total_strain_ratio = bond_strains_series[timestep]
        strains = np.array(list(x_strains_dict.values()))
        # Remove NaN or infinite values
        filtered_strains = strains[np.isfinite(strains)]

        filtered_strains = [
            strain for strain in filtered_strains if abs(strain) <= x_right_lim]

        # Create histogram with global bins
        counts, _ = np.histogram(filtered_strains, bins=global_bin_edges)

        # Plot histogram
        plt.figure(figsize=(8, 6))
        plt.hist(filtered_strains, bins=global_bin_edges,
                 alpha=0.7, label="Edge Strains")
        plt.axvline(total_strain_ratio, color='r', linestyle='--',
                    label=f"Total Strain Ratio: {round(total_strain_ratio, 3)}")
        plt.ylim(top=300)
        plt.ylabel("Frequency")
        plt.legend()
        plt.title(f"Edge Strain Distribution at Timestep {timestep}")

        # Use scientific notation on x-axis and indicate the scale
        ax = plt.gca()
        ax.ticklabel_format(axis='x', style='sci', scilimits=(-2, 2))
        scale_text = ax.xaxis.get_offset_text().get_text()
        if scale_text:
            ax.set_xlabel(f"Strain ({scale_text.strip()})")
            ax.xaxis.get_offset_text().set_visible(False)
        else:
            ax.set_xlabel("Strain")

        # Save the figure
        image_file = os.path.join(
            frame_dir, f"strain_hist_{note}_{timestep}.png")
        plt.savefig(image_file)
        plt.close()
        image_files.append(image_file)

        # Collect counts for heatmap
        strain_distributions.append(counts)

    # Create GIF
    images = []
    for filename in image_files:
        images.append(imageio.imread(filename))
    gif_path = os.path.join(save_path, f"strain_hist_{note}.gif")
    imageio.mimsave(gif_path, images, duration=1.5,
                    loop=10)  # Adjust duration as needed

    # Create Heatmap
    # Convert strain_distributions to numpy array
    strain_distributions = np.array(strain_distributions)

    plt.figure(figsize=(10, 8))
    plt.imshow(strain_distributions, aspect='auto', origin='lower', cmap='hot')
    plt.colorbar(label='Frequency')
    plt.xlabel('Strain')
    plt.ylabel('Timestep')
    plt.title('Strain Distribution Over Time')

    # Set x-ticks to correspond to bin centers
    num_bins = len(bin_centers)
    num_ticks = 10  # Number of ticks to display
    tick_positions = np.linspace(0, num_bins - 1, num_ticks)
    if bin_centers[0] != bin_centers[-1]:  # 非 常数序列
        tick_labels = np.round(np.linspace(
            bin_centers[0], bin_centers[-1], num_ticks), 2)
    else:  # 常数序列,所有的数值都一样
        tick_labels = np.full(num_ticks, bin_centers[0])
    plt.xticks(tick_positions, tick_labels)

    plt.yticks(range(len(timesteps)), timesteps)
    heatmap_path = os.path.join(save_path, f"strain_hist_{note}.png")
    plt.savefig(heatmap_path)
    plt.close()

    # # 对称性判断
    # threshold_and_calculate_center(strain_distributions,
    #                                 threshold=5,
    #                                 bin_centers=bin_centers,
    #                                 save_path=save_path)


def calculate_total_strain_ratio(graph_initial, graph_finale):
    try:
        # 尝试第一种格式
        x_min_initial, x_max_initial = map(
            float, graph_initial['BOX_BOUNDS'][0].split())
        x_min_final, x_max_final = map(
            float, graph_finale['BOX_BOUNDS'][0].split())

        L_initial = x_max_initial - x_min_initial
        L_final = x_max_final - x_min_final
        method = 'standard_1'
    except (KeyError, ValueError, TypeError):
        # 回退到第二种格式
        x_min_initial = 0
        x_max_initial = bm.get_box_dimensions(
            graph_initial['graph'].graph['box'])[0]
        x_min_final = 0
        x_max_final = bm.get_box_dimensions(
            graph_finale['graph'].graph['box'])[0]

        L_initial = x_max_initial - x_min_initial
        L_final = x_max_final - x_min_final
        method = 'standard_2'

    total_strain_ratio = (L_final - L_initial) / L_initial
    return total_strain_ratio


def get_box_length_x(graph):
    """
    从 graph 中读取 x 方向的盒子长度:
    - 首先尝试格式一:graph['BOX_BOUNDS'][0] 为 "min max" 字符串
    - 若失败,回退到格式二:使用 bm.get_box_dimensions(graph['graph'].graph['box'])
    返回:
    float: 盒子在 x 方向的长度
    抛出:
    ValueError: 两种方式都无法提取时
    """
    # 尝试格式一
    try:
        x0, x1 = map(float, graph['BOX_BOUNDS'][0].split())
        return x1 - x0
    except (KeyError, IndexError, ValueError, TypeError):
        # 回退格式二
        try:
            # 假设 get_box_dimensions 返回一个 tuple/list,[Lx, Ly, Lz]
            return bm.get_box_dimensions(graph['graph'].graph['box'])[0]
        except Exception as e:
            raise ValueError(f"无法从 graph 提取 x 方向盒子长度(两种格式均失败):{e}")


def plot_x_strain_hist(graph_initial, graph_finale, note='', network_mission_dir="."):
    """
    # 2025-02-20 输入的 graph_initial 是一个 dict, 在 graph_initial['graph'] 才是网络结构
    绘制网络从初始到末态在 x 方向投影的形变率直方图,并标注总形变率.

    参数:
    - graph_initial: 初始的网络,包含 'BOX_BOUNDS' 和 'graph' 键
    - graph_finale: 末态的网络,同上
    - note: 注释
    - network_mission_dir: 保存图像的目录路径
    """
    import os
    import numpy as np
    import matplotlib.pyplot as plt

    # 计算初始和末态的总应变率
    total_strain_ratio = calculate_total_strain_ratio(
        graph_initial, graph_finale)
    # 从 BOX_BOUNDS 中提取 x 方向的盒子长度(边界为 "min max" 的字符串)
    box_length_initial_x = get_box_length_x(graph_initial)
    box_length_final_x = get_box_length_x(graph_finale)

    x_strains = []
    x_strains_dict = {}
    all_point = []

    # 遍历初始网络中每一条边,计算每条边在 x 方向的形变率
    for u, v, k in graph_initial['graph'].edges(keys=True):
        # 获取初始和末态节点的坐标
        initial_pos_u = np.array(graph_initial['graph'].nodes[u]['posi'])
        initial_pos_v = np.array(graph_initial['graph'].nodes[v]['posi'])
        final_pos_u = np.array(graph_finale['graph'].nodes[u]['posi'])
        final_pos_v = np.array(graph_finale['graph'].nodes[v]['posi'])

        # 调试:记录初始 x 坐标
        all_point.append(initial_pos_u[0])

        # 计算初始与末态节点在 x 方向的分量距离
        delta_initial_x = initial_pos_v[0] - initial_pos_u[0]
        delta_final_x = final_pos_v[0] - final_pos_u[0]

        # 考虑周期性边界条件调整(使用盒子长度)
        delta_initial_x -= box_length_initial_x * \
            np.round(delta_initial_x / box_length_initial_x)
        delta_final_x -= box_length_final_x * \
            np.round(delta_final_x / box_length_final_x)

        # 计算 x 方向的形变率
        if delta_initial_x != 0:
            strain_x = (delta_final_x - delta_initial_x) / delta_initial_x
        else:
            strain_x = 0  # 避免除以零
        x_strains.append(strain_x)
        x_strains_dict[(u, v, k)] = strain_x

    if False:  # 单帧绘图功能由 GIF 函数同步实现,这里暂不使用
        x_right_lim = 3
        filtered_strains = [
            strain for strain in x_strains if abs(strain) <= x_right_lim]

        plt.figure(figsize=(8, 6))
        bins = 100
        plt.hist(filtered_strains, bins=bins, alpha=0.7, label="Edge Strains")
        plt.axvline(total_strain_ratio, color='r', linestyle='--',
                    label=f"Total Strain Ratio: {total_strain_ratio:.2f}")
        plt.xlabel("Strain")
        plt.ylabel("Frequency")
        plt.legend()
        plt.title("Edge Strain Distribution")

        save_path = os.path.join(network_mission_dir, 'analysis')
        os.makedirs(save_path, exist_ok=True)
        plt.savefig(os.path.join(save_path, f'x_strain_Dist_{note}.png'))
        plt.close()

    return x_strains_dict, total_strain_ratio


# 应变热力图 的对称性分析函数

def threshold_and_calculate_center(strain_distributions,
                                   threshold=3,
                                   bin_centers=None,
                                   timesteps=None,
                                   note='',
                                   save_path='',
                                   binary_image_name='binary_image.png'):
    """
    在原函数基础上,计算每一行的正负部分平均位置,并计算全局的正负部分平均位置.
    """
    # ========== 1) 二值化 ==========
    binary_dist = (strain_distributions > threshold).astype(int)

    # ========== 2) 如果没有提供 bin_centers,就用列索引 ==========
    num_rows, num_cols = strain_distributions.shape
    if bin_centers is None or len(bin_centers) != num_cols:
        bin_centers = np.arange(num_cols)

    # ========== 3) 初始化结果 ==========
    row_neg_avgs = []
    row_pos_avgs = []
    total_sum_x_neg = 0.0
    total_sum_x_pos = 0.0
    total_count_neg = 0.0
    total_count_pos = 0.0

    # ========== 3.1) 绘制并保存二值图 ==========
    plt.figure(figsize=(10, 6))
    plt.imshow(binary_dist,
               aspect='auto',    # 是否等比例,可按需修改
               origin='lower',   # (0,0) 在左下角
               cmap='gray')
    plt.colorbar(label='Binary Value')
    plt.title(f'Binary Image (threshold={threshold})')

    # 设置 x 轴刻度
    num_rows, num_cols = strain_distributions.shape
    if bin_centers is not None and len(bin_centers) == num_cols:
        num_bins = len(bin_centers)
        # 避免刻度过密,只显示部分刻度,可根据需求调整
        num_ticks = min(10, num_bins)
        tick_positions = np.linspace(0, num_bins - 1, num_ticks)
        if bin_centers[0] != bin_centers[-1]:
            tick_labels = np.round(np.linspace(
                bin_centers[0], bin_centers[-1], num_ticks), 2)
        else:
            # 如果整个 bin_centers 都是一个常数(极少见),则直接填充
            tick_labels = np.full(num_ticks, bin_centers[0])
        plt.xticks(tick_positions, tick_labels)
        plt.xlabel('X (bin_centers)')
    else:
        plt.xlabel('Column Index (X-axis)')

    # 设置 y 轴刻度
    if timesteps is not None and len(timesteps) == num_rows:
        plt.yticks(range(num_rows), timesteps)
        plt.ylabel('Timesteps')
    else:
        plt.ylabel('Row Index (Y-axis)')

    # 保存
    binary_image_name = f'binary_image-threshold={threshold}.png'
    binary_path = os.path.join(save_path, binary_image_name)
    plt.savefig(binary_path)
    plt.close()

    # ========== 4) 遍历每一行 ==========
    for row in range(num_rows):
        sum_x_neg = 0.0
        sum_x_pos = 0.0
        count_neg = 0.0
        count_pos = 0.0

        for col in range(num_cols):
            if binary_dist[row, col] == 1:
                if bin_centers[col] < 0:
                    sum_x_neg += bin_centers[col]
                    count_neg += 1
                else:
                    sum_x_pos += bin_centers[col]
                    count_pos += 1

        # 计算当前行的正负平均位置
        neg_avg_x = sum_x_neg / count_neg if count_neg > 0 else np.nan
        pos_avg_x = sum_x_pos / count_pos if count_pos > 0 else np.nan

        # 保存每行的结果
        row_neg_avgs.append(neg_avg_x)
        row_pos_avgs.append(pos_avg_x)

        # 累加到总和
        total_sum_x_neg += sum_x_neg
        total_sum_x_pos += sum_x_pos
        total_count_neg += count_neg
        total_count_pos += count_pos

    # ========== 5) 计算全局正负部分的平均位置 ==========
    global_neg_avg = total_sum_x_neg / \
        total_count_neg if total_count_neg > 0 else np.nan
    global_pos_avg = total_sum_x_pos / \
        total_count_pos if total_count_pos > 0 else np.nan
    # 各行平均位置
    row_avgs = (np.array(row_neg_avgs)+np.array(row_pos_avgs))/2
    plt.plot(row_avgs)
    plt.savefig(f'各行数据-threshold={threshold}.png')
    # ========== 6) 打印结果 ==========
    neg_pos_ave = np.array(global_neg_avg+global_pos_avg)/2
    print(f"  全局 正负部分的平均位置 = {neg_pos_ave}")

    return row_neg_avgs, row_pos_avgs, global_neg_avg, global_pos_avg


# 2025-02-14
# 绘制若干个任务的阴影和其平均数值
def plot_with_shadow(x_data, data_df, mode='std', note='', x_label='x', y_label='y',
                     figsave_flag=True):
    """
    绘制平均曲线,并用阴影表示标准差 ('std') 或最大最小值范围 ('range').

    参数:
    - x_data: 一维 numpy 数组,表示 x 轴取值(与 data_df 行数对应)
    - data_df: DataFrame,每一列代表一个文件的 y 值,行数必须与 x_data 的长度一致
    - mode: 'std' 表示绘制平均值±1标准差的阴影,'range' 表示绘制最大值与最小值的阴影
    - note: 保存图片时的文件名标识
    - x_label: x 轴标签
    - y_label: y 轴标签
    """
    # 将数据转换为浮点型,并将 NaN 或正负无穷值替换为 0.0
    data = np.nan_to_num(data_df.astype(
        float), nan=0.0, posinf=0.0, neginf=0.0)

    # 计算每一行(对应某一 x 值)的平均值
    mean_values = data.mean(axis=1)

    if mode == 'std':
        std_values = data.std(axis=1)
        shadow_upper = mean_values + std_values
        shadow_lower = mean_values - std_values
        shadow_label = '±1 Std Dev'
    elif mode == 'range':
        shadow_upper = data.max(axis=1)
        shadow_lower = data.min(axis=1)
        shadow_label = 'Max-Min Range'
    else:
        raise ValueError("Invalid mode. Use 'std' or 'range'.")

    # 绘制平均曲线及阴影区域
    fig = plt.figure(figsize=(6, 4), dpi=100)
    plt.plot(x_data, mean_values, color='darkred',
             linewidth=2, label='Mean curve')
    plt.fill_between(x_data, shadow_lower, shadow_upper,
                     color='lightcoral', alpha=0.3, label=shadow_label)

    # 使用传入的 x_label 和 y_label
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.legend()
    plt.title(f'Plot with Shadow Region ({mode.upper()})')
    plt.tight_layout()
    save_name = f'{note}_mode_{mode}_shadow'
    if figsave_flag:
        plt.savefig(f'./{save_name}.png')

    # 构造要保存的字典
    data_dict = {'x_data': x_data, 'mean_values': mean_values}
    # 定义保存路径
    pkl_path = f'./{save_name}.pkl'
    # 使用 pickle 将字典保存到文件中
    with open(pkl_path, 'wb') as f:
        pickle.dump(data_dict, f)

    return fig  # 返回当前活动的 Figure 对象

# 2025-02-25  GEBCs-strain 的 correlation 和 SS曲线


def plot_gebc_and_ss(v_strain_data, v_strees_data,
                     x_strain_label, correlations_GEBC_time, correlations_GEBC_m1_time,
                     save_path='./'):
    """
    绘制两个数据组合:
      - 上图:GEBCs-strain 相关性数据,使用 x_strain_label 作为横坐标,分别绘制 correlations_GEBC_time 和 correlations_GEBC_m1_time.
      - 下图:应力数据,使用 v_strain_data 作为横坐标,绘制 v_strees_data,并且只绘制 x 值不超过 x_strain_label 最大值的部分.

    参数:
      - v_strain_data: array-like,用于下图数据的横坐标.
      - v_strees_data: array-like,下图数据的纵坐标(应力数据).
      - x_strain_label: array-like,上图 GEBC 相关性数据的横坐标.
      - correlations_GEBC_time: array-like,第一个 GEBC 相关性数据序列.
      - correlations_GEBC_m1_time: array-like,第二个 GEBC 相关性数据序列.
    """
    import matplotlib.pyplot as plt
    import numpy as np
    import os

    # 创建上下两个子图
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 12))

    # 上图:绘制 GEBC 相关性数据
    ax1.plot(
        x_strain_label,
        correlations_GEBC_time,
        label='GEBC Time Correlation',
        color=(0.1216, 0.4667, 0.7059),  # 默认蓝色
        marker='.'
    )
    ax1.plot(
        x_strain_label,
        correlations_GEBC_m1_time,
        label='GEBC m1 Time Correlation',
        color=(1.0, 0.4980, 0.0549),  # 默认橙色
        marker='.'
    )
    ax1.axhline(0, color='red', linestyle='-.', linewidth=1)
    ax1.set_xlabel('Strain')
    ax1.set_ylabel('GEBC Correlations', color='black')
    ax1.tick_params(axis='y', labelcolor='black')
    ax1.legend(loc='best')
    ax1.set_title('GEBC Correlations')

    # 截断 v_strain_data, v_strees_data,只保留 x 值 <= x_strain_label 最大值的数据
    max_x = np.max(x_strain_label)
    v_strain_data = np.array(v_strain_data)
    v_strees_data = np.array(v_strees_data)
    valid = v_strain_data <= max_x
    truncated_v_strain_data = v_strain_data[valid]
    truncated_v_strees_data = v_strees_data[valid]

    # 下图:绘制应力数据
    ax2.plot(
        truncated_v_strain_data,
        truncated_v_strees_data,
        label='Stress Data',
        color='red'
    )
    ax2.set_xlabel('Strain')
    ax2.set_ylabel('Stress', color='red')
    ax2.tick_params(axis='y', labelcolor='red')
    ax2.legend(loc='best')
    ax2.set_title('Strain-Stress Data')

    # 设置 x 轴范围(两个子图均使用截断后的数据范围)
    if truncated_v_strain_data.size > 0:
        x_min = np.min(truncated_v_strain_data)
        x_max = np.max(truncated_v_strain_data)
    else:
        x_min = np.min(v_strain_data)
        x_max = np.max(v_strain_data)
    ax1.set_xlim(x_min, x_max)
    ax2.set_xlim(x_min, x_max)

    plt.tight_layout()

    pic_path = os.path.join(
        save_path, 'Correlation_between_GEBCs_and_Strain_with_ss_Over_Time.png')
    plt.savefig(pic_path)


def plot_gebc_and_bb(x_strain_label, broken_nums,
                     correlations_GEBC_time, correlations_GEBC_m1_time,
                     save_path='./', pic_name=''
                     ):
    """
    Plot GEBC correlations (time and m1_time) on the left y-axis and
    broken_nums on the right y-axis, using x_strain_label as the x-axis for all series.

    Parameters:
    - x_strain_label: array-like, common x-axis data.
    - broken_nums: array-like, data to be plotted on the right y-axis.
    - correlations_GEBC_time: array-like, first correlation series (left y-axis).
    - correlations_GEBC_m1_time: array-like, second correlation series (left y-axis).
    """

    import matplotlib.pyplot as plt

    fig, ax1 = plt.subplots(figsize=(10, 6))

    # 使用默认蓝色 (Default Blue) 和默认橙色 (Default Orange) 绘制两条相关性曲线
    ax1.plot(
        x_strain_label,
        correlations_GEBC_time,
        label='Correlations GEBC Time',
        # 默认蓝色: (31, 119, 180) 或 (0.1216, 0.4667, 0.7059)
        color=(0.1216, 0.4667, 0.7059),
        marker='.'
    )
    ax1.plot(
        x_strain_label,
        correlations_GEBC_m1_time,
        label='Correlations GEBC m1 Time',
        # 默认橙色: (255, 127, 14) 或 (1.0, 0.4980, 0.0549)
        color=(1.0, 0.4980, 0.0549),
        marker='.'
    )

    ax1.set_xlabel('x_strain_label')
    ax1.set_ylabel('Correlations', color='black')
    ax1.tick_params(axis='y', labelcolor='black')

    # 创建共享x轴的右侧轴,用于绘制 broken_nums
    ax2 = ax1.twinx()
    ax2.plot(
        x_strain_label,
        broken_nums,
        label='Broken Nums',
        color='red',
    )
    ax2.set_ylabel('Broken Nums', color='red')
    ax2.tick_params(axis='y', labelcolor='red')

    # 合并两个坐标轴的图例
    lines1, labels1 = ax1.get_legend_handles_labels()
    lines2, labels2 = ax2.get_legend_handles_labels()
    ax1.legend(lines1 + lines2, labels1 + labels2, loc='best')

    plt.title(pic_name)
    plt.tight_layout()

    plt.savefig(save_path)

# 2025-02-26

# ------------------------------
# 处理不等长的采样数据:对齐到公共 x 轴
# 假设:
# - strains 是一个 list,每个元素都是一组数据对应的 x 轴(采样位置),长度不一定相同.
# - correlations_GEBC_times 是一个 list,每个元素都是该组数据对应的 y 值,且与 strains 中对应位置的数据一一匹配.


def align_data(x_list, y_list, num_points=100):
    """
    将多组 (x, y) 数据对齐到一个公共的 x 轴上.

    参数:
    - x_list: list,每个元素是一个一维数组或列表,表示该组数据的 x 坐标
    - y_list: list,每个元素是一个一维数组或列表,表示该组数据的 y 值,与 x_list 对应
    - num_points: 公共 x 轴上点的个数,默认 100 个点

    返回:
    - common_x: 公共 x 轴,numpy 数组
    - data_df: DataFrame,每一列是一组数据经过插值后的 y 值,行数等于 common_x 的长度
    """
    # 计算所有 x 数据的全局最小值和最大值
    x_min = min([min(x) for x in x_list])
    x_max = max([max(x) for x in x_list])
    common_x = np.linspace(x_min, x_max, num_points)

    aligned_data = []
    for x, y in zip(x_list, y_list):
        # 利用线性插值,将该组数据的 y 值映射到公共 x 轴上
        y_interp = np.interp(common_x, x, y)
        aligned_data.append(y_interp)

    # 将插值后的数据转置后构造成 DataFrame:行对应 common_x,列对应各组数据
    data_df = pd.DataFrame(np.array(aligned_data).T)
    return common_x, data_df

# 本地的库
# 2025-03-03


def generation_tree(G, r_max=6):
    """
    2025-02-19 完成对于 完美网络的调试

    对输入图 G 进行预处理:移除自环和多重边,得到无向简单图 G_temp;
    同时记录被删除的自环和多重边.

    对 G_temp 中的每条边 (node_a, node_b) 进行分代搜索,
    每一代按以下步骤计算 defection_prop:
        - 第一阶段:从上一代节点出发,寻找其邻居中对应的边未曾访问过的,
          记录这些边并将对应邻居添加至 candidate_nodes(允许重复计数).
        - 计算当前代的 defection_prop = (3 * node_num - edge_num) / (3 * node_num)
          (其中 node_num 为上一代节点数,edge_num 为本代新增边数).

    返回:
        G_temp: 预处理后得到的无向简单图
        remove_edge: 预处理时删除的多重边列表
        self_loops_removed: 删除的自环列表
        edge_defection_props: 字典,键为 (min(u,v), max(u,v)) 表示的边,
                              值为该边对应的各代 defection_prop 列表.
    """
    # 预处理:构建无自环,无多重边的图 G_temp
    G_temp = nx.Graph()
    remove_edge = []          # 存储删除的多重边
    self_loops_removed = []   # 存储被删除的自环

    for u, v in G.edges():
        if u == v:
            self_loops_removed.append((u, v))
            continue
        # 采用 (min(u, v), max(u, v)) 作为边的标准形式
        edge_canon = (min(u, v), max(u, v))
        if G_temp.has_edge(*edge_canon):
            remove_edge.append((u, v))
        else:
            G_temp.add_edge(*edge_canon)

    # 对 G_temp 中的每一条边进行 generation tree 分析
    edge_defection_props = {}
    for node_a, node_b in G_temp.edges():
        defection_props = []              # 存储每代的 defection_prop 值
        current_nodes = [node_a, node_b]   # 第一代节点(种子边的两个节点)
        visited_all = set(current_nodes)   # 记录所有已访问的节点

        # 建立节点的根字典,key 是节点, value 是节点的来源 edge.
        father_nodes = {node_a: node_b, node_b: node_a}

        for r in range(1, r_max + 1):
            new_edges = []      # 记录本代新增的边(均为标准形式)
            candidate_nodes = []  # 记录本代新发现的节点(允许重复计数)
            # previous_visited_edge = visited_edges.copy()

            # 第一阶段:从上一代节点扩展到新节点
            for node in current_nodes:
                for nbr in G_temp.neighbors(node):
                    candidate_edge = (min(node, nbr), max(node, nbr))
                    if nbr != father_nodes[node]:
                        new_edges.append(candidate_edge)
                        if nbr not in list(father_nodes.keys()):
                            father_nodes[nbr] = node
                        candidate_nodes.append(nbr)

            # 计算本代的 defection_prop
            node_num = len(current_nodes)
            edge_num = len(new_edges)
            dp = (3 * node_num - edge_num) / \
                (3 * node_num) if node_num > 0 else 0
            defection_props.append(dp)

            # 从 candidate_nodes 中减去已经在 current_nodes 的点
            visited_all.update(current_nodes)
            candidate_nodes = [
                node for node in candidate_nodes if node not in visited_all]
            current_nodes = list(set(candidate_nodes))

            if not new_edges:  # 若无新节点扩展,则提前结束分代搜索
                break

        # 保存当前种子边对应的各代 defection_prop 值
        edge_key = (min(node_a, node_b), max(node_a, node_b))
        edge_defection_props[edge_key] = defection_props

    # --- 新增部分 ---
    # 根据 remove_edge 中删除的多重边,利用 G_temp 中对应边的 defection_props
    # 将多重边的 defection_props 还原回去,即在 edge_defection_props 字典中增加多重边对应的条目
    for u, v in remove_edge:
        canonical_edge = (min(u, v), max(u, v))
        if canonical_edge in edge_defection_props:
            # 直接复制 G_temp 中保留的边的 defection_props 给该多重边
            edge_defection_props[(u, v)] = edge_defection_props[canonical_edge]
        else:
            # 若没有找到对应的边,则设置为空列表(或其他默认值)
            edge_defection_props[(u, v)] = []
    # --- 返回部分修改 ---
    # 修改返回值,只返回 edge_defection_props 字典,其他变量不再返回
    return edge_defection_props


def get_strain_x(graph_initial, graph_current):
    """
    从 graph_current 中提取 STRAIN_X:
      1. 优先取 graph_current['STRAIN_X']
      2. 否则尝试取 graph_current.graph['STRAIN_X']
      3. 都不存在时,调用 am.calculate_total_strain_ratio 计算并返回

    参数:
      graph_initial   —— 初始时刻的图对象(graph_series[0])
      graph_current   —— 当前时刻的图对象(graph_series[time])
    返回:
      float 型的 STRAIN_X 值
    """
    # 1. 直接从 dict 里取
    if 'STRAIN_X' in graph_current:
        return graph_current['STRAIN_X']

    # 2. 再尝试从 .graph 属性里取
    try:
        # 有些图对象可能没有 .graph 属性,或者 graph_current.graph 不是 dict
        gdict = getattr(graph_current, 'graph', None)
        if isinstance(gdict, dict) and 'STRAIN_X' in gdict:
            return gdict['STRAIN_X']
    except Exception:
        pass

    # 3. 最后调用计算函数
    #    这里传入初始图和当前图,让 am.calculate_total_strain_ratio 自行计算
    return calculate_total_strain_ratio(graph_initial, graph_current)


def top_k_subgraph_stats(G, k=5):
    """
    返回并打印图 G 中最大的 k 个连通子图的大小信息.

    参数:
        G: networkx 图对象
        k: 需要列出的子图数量,默认 5

    返回:
        sizes: 列表,长度 <= k,每个元素为 (节点数, 边数)
    """
    # 获取所有连通组件,并按节点数量降序排序
    components = sorted(nx.connected_components(G), key=len, reverse=True)
    total = len(components)

    # 如果子图数量少于 k,提示并正常处理
    if total == 0:
        print("图中没有任何连通子图.")
        return []
    if total < k:
        print(f"仅发现 {total} 个连通子图,少于请求的 {k} 个.将打印所有子图.")

    sizes = []
    # 遍历最多 k 个组件,打印并记录它们的节点数和边数
    for idx, comp in enumerate(components[:k], start=1):
        subG = G.subgraph(comp)
        node_count = subG.number_of_nodes()
        edge_count = subG.number_of_edges()
        print(f"子图 {idx}: {node_count} 个节点, {edge_count} 条边")
        sizes.append((node_count, edge_count))

    return sizes
