from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from flax import linen as nn

from jax import tree_util

from cca_zoo.linear import CCA, rCCA
from cca_zoo.model_selection import GridSearchCV

import seaborn as sns
import logging
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd

import numpy as np
from scipy.linalg import qr, svd, inv
import logging

from scipy.spatial.distance import pdist, squareform
from scipy.stats import pearsonr

from bokeh.plotting import figure, show, output_file
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.palettes import Category20

def vector_projection(A, X):
    """
    计算向量 A 在基 X 上的投影。

    参数：
    A: numpy.ndarray，形状为 (128,)
    X: numpy.ndarray，形状为 (4, 128)，线性无关的基向量 X0, X1, X2, X3

    返回：
    projections: numpy.ndarray，形状为 (4,)，A 在 X0, X1, X2, X3 上的投影值
    """
        # 使用 QR 分解将 X 正交化
    Q, R = np.linalg.qr(X.T)
    # Q 的形状为 (128, 4)，正交列向量
    U = Q[:, :4].T  # 形状转换为 (4, 128)

    # 计算 A 在正交基上的当前投影
    current_projections = U @ A  # 形状为 (4,)

    return current_projections

def adjust_vector_projection_orthonormal(A, X, desired_projections):
    """
    调整向量 A，使其在正交基 X 上的投影等于 desired_projections。

    参数：
    A: numpy.ndarray，形状为 (128,)
    X: numpy.ndarray，形状为 (4, 128)，线性无关的基向量 X0, X1, X2, X3
    desired_projections: numpy.ndarray，形状为 (4,)，目标投影值 y0, y1, y2, y3

    返回：
    A_prime: numpy.ndarray，形状为 (128,)，调整后的向量
    """
    # 使用 QR 分解将 X 正交化
    Q, R = np.linalg.qr(X.T)
    # Q 的形状为 (128, 4)，正交列向量
    U = Q[:, :4].T  # 形状转换为 (4, 128)

    # 计算 A 在正交基上的当前投影
    current_projections = U @ A  # 形状为 (4,)

    # print(current_projections)

    # 计算需要调整的投影差值
    delta = desired_projections - current_projections  # 形状为 (4,)

    # 调整向量 A
    A_prime = A + U.T @ delta  # U.T 形状为 (128,4), delta 是 (4,)

    return A_prime


def transform_cca(X, A):

    return X @ A

def inverse_cca(A, U, data_prop):
    """
    Inverse CCA: Given the canonical coefficients `A`, the canonical scores `U`, the standard deviation `std_x` and the mean `mean_x` of the original data, return the original data `X`
    """

    # check if A is a square matrix
    assert A.shape[0] == A.shape[1]

    A_inverse = np.linalg.pinv(A)
    _packed_rnn_states = U @ A_inverse
    _packed_rnn_states = _packed_rnn_states * data_prop[2] + data_prop[0]
    return _packed_rnn_states


def canoncorr(X0: np.array, Y0: np.array, fullReturn: bool = False) -> np.array:
    """
    Canonical Correlation Analysis (CCA) with added diagnostics and preprocessing
    
    Parameters:
    X, Y: (samples/observations) x (features) matrix
    fullReturn: whether all outputs should be returned or just `r` be returned
    
    Returns: A, B, r, U, V (if fullReturn is True) or just r (if fullReturn is False)
    A, B: Canonical coefficients for X and Y
    U, V: Canonical scores for the variables X and Y
    r: Canonical correlations
    """
    n, p1 = X0.shape
    p2 = Y0.shape[1]
    
    # # Data diagnostics
    # print(f"X shape: {X0.shape}, Y shape: {Y0.shape}")
    # print(f"X condition number: {np.linalg.cond(X0)}")
    # print(f"Y condition number: {np.linalg.cond(Y0)}")
    
    if p1 >= n or p2 >= n:
        logging.warning('Not enough samples, might cause problems')

    # Preprocessing: Standardize the variables
    x_mean, y_mean = np.mean(X0, 0), np.mean(Y0, 0)
    x_std, y_std = np.std(X0, 0), np.std(Y0, 0)
    X = (X0 - x_mean) / x_std
    Y = (Y0 - y_mean) / y_std

    # print(np.std(X0, 0), np.std(Y0, 0))

    # Factor the inputs, and find a full rank set of columns if necessary
    Q1, T11, perm1 = qr(X, mode='economic', pivoting=True)
    Q2, T22, perm2 = qr(Y, mode='economic', pivoting=True)

    # Determine ranks with a more stringent threshold
    tol = np.finfo(float).eps * 100  # Increased tolerance
    rankX = np.sum(np.abs(np.diagonal(T11)) > tol * np.abs(T11[0, 0]))
    rankY = np.sum(np.abs(np.diagonal(T22)) > tol * np.abs(T22[0, 0]))

    # print(f"Rank of X: {rankX}, Rank of Y: {rankY}")

    if rankX == 0:
        raise ValueError('X has zero rank')
    elif rankX < p1:
        logging.warning('X is not full rank')
        Q1 = Q1[:, :rankX]
        T11 = T11[:rankX, :rankX]

    if rankY == 0:
        raise ValueError('Y has zero rank')
    elif rankY < p2:
        logging.warning('Y is not full rank')
        Q2 = Q2[:, :rankY]
        T22 = T22[:rankY, :rankY]

    # Compute canonical coefficients and canonical correlations
    d = min(rankX, rankY)
    # L, D, M = svd(Q1.T @ Q2, full_matrices=False)
    L,D,M = svd(Q1.T @ Q2, full_matrices=True, check_finite=True, lapack_driver='gesdd')
    M = M.T

    A = inv(T11) @ L[:, :d] * np.sqrt(n - 1)
    B = inv(T22) @ M[:, :d] * np.sqrt(n - 1)
    r = D[:d]
    
    # Remove roundoff errors
    r = np.clip(r, 0, 1)

    if not fullReturn:
        return r

    # Put coefficients back to their full size and correct order
    A_full = np.zeros((p1, d))
    A_full[perm1, :] = np.vstack((A, np.zeros((p1 - rankX, d))))
    
    B_full = np.zeros((p2, d))
    B_full[perm2, :] = np.vstack((B, np.zeros((p2 - rankY, d))))

    # Compute the canonical variates
    U = X @ A_full
    V = Y @ B_full

    return A_full, B_full, r, U, V, [x_mean, y_mean, x_std, y_std]


def plot_lollipop(scores, title="Lollipop Plot of Scores", xlabel="Index", ylabel="Score", figsize=(12, 6)):
    """
    绘制带连线和数值标注的棒棒糖图，数值保留两位小数，x轴标签从1开始
    
    参数:
    scores : list 或 numpy array
        要绘制的数据
    title : str, 可选
        图表标题
    xlabel : str, 可选
        x轴标签
    ylabel : str, 可选
        y轴标签
    figsize : tuple, 可选
        图表大小
    """
    
    # 创建对应的 x 值
    x = np.arange(len(scores))

    # 创建图表
    fig, ax = plt.subplots(figsize=figsize)

    # 绘制垂直线
    ax.vlines(x, 0, scores, colors='gray', lw=1, alpha=0.5)

    # 绘制数据点并连线
    ax.plot(x, scores, color='black', marker='o', markersize=8, linestyle='-', linewidth=1)

    # 添加数值标注，保留两位小数
    for i, score in enumerate(scores):
        ax.annotate(f'{score:.2f}', (i, score), textcoords="offset points", 
                    xytext=(0,10), ha='center', va='bottom', fontsize=8)

    # 设置坐标轴范围
    ax.set_xlim(-0.5, len(scores) - 0.5)
    ax.set_ylim(0, max(scores) * 1.2)  # 增加上界以容纳标注

    # 设置标题和标签
    # ax.set_title(title)
    ax.set_xlabel(xlabel)
    ax.set_ylabel(ylabel)

    # 移除顶部和右侧边框
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)

    # 移除网格线
    ax.grid(False)

    # # 调整 x 轴刻度，标签从1开始
    # ax.set_xticks(x)
    # ax.set_xticklabels(range(1, (len(scores) + 1)))

    # 调整 x 轴刻度，每个数字为横坐标的10倍
    ax.set_xticks(x)
    ax.set_xticklabels([str(i*10) for i in range(len(scores))])

    # 设置 y 范围 0-1
    ax.set_ylim(0, 1.1)

    plt.tight_layout()
    plt.show()


def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal


def preprocess(trjs, max_length):
    processed_trjs = []
    for i in range(trjs.shape[0]):
        trj = trjs[i]
        if trj.shape[0] < max_length:
            last_element = trj[-1]
            processed_trj = np.concatenate([trj, np.repeat(last_element[np.newaxis,:], max_length - trj.shape[0], axis=0)], axis=0)
            processed_trjs.append(processed_trj)
        else:
            processed_trjs.append(trj)
    processed_trjs = np.array(processed_trjs)
    return processed_trjs
    
@jax.jit
def build_radiance_field(p):
    img = jnp.zeros((21, 21))
    top = 10
    bottom = 0
    effective_radius = 21*1.414
    px, py = p[0], p[1]

    x_coord_map = jnp.arange(img.shape[0])
    x_coord_map = jnp.repeat(x_coord_map[:, jnp.newaxis], img.shape[1], axis=1)

    y_coord_map = jnp.arange(img.shape[1])
    y_coord_map = jnp.repeat(y_coord_map[jnp.newaxis, :], img.shape[0], axis=0)
    
    dist = jnp.sqrt((x_coord_map - px)**2 + (y_coord_map - py)**2)
    img = jnp.where(dist < effective_radius,
                    (top - bottom) * (effective_radius - dist) / effective_radius + bottom,
                    img)
    return img

@jax.jit
def get_max_radiance_field(imgs):
    img = jnp.max(imgs, axis=0)
    return img

@jax.jit
def build_ridge(A):

    # 将 A 的第一个点对齐到 (10,10)
    A = A-A[0]+jnp.array([10,10])

    # 使用 jnp.map 来并行计算每个辐射场图像
    imgs = jax.vmap(build_radiance_field)(A)
    img = get_max_radiance_field(imgs)
    return img

build_ridge_vmap = jax.vmap(build_ridge)


def compute_trajectory_manifold():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    def load_data_and_compute(nn_type, seq_len, redundancy, diverse_set_capacity):

        rnn_limit_rings_file_name = "./logs/rnn_limit_rings_of_best_estimation_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
        # 载入 npz 文件
        rnn_limit_rings_of_best_estimation_file = np.load(rnn_limit_rings_file_name)

        # 获取 npz 文件中的所有对象名称
        matrix_names = rnn_limit_rings_of_best_estimation_file.files

        rnn_limit_rings_of_best_estimation = []

        # 遍历对象名称，访问和操作每个矩阵对象
        for name in matrix_names:
            matrix = rnn_limit_rings_of_best_estimation_file[name]
            # 在这里进行对矩阵对象的操作
            # 例如，打印矩阵的形状
            # print(f"Matrix '{name}' shape: {matrix.shape}")
            rnn_limit_rings_of_best_estimation.append(matrix)

        # 求 rnn_limit_rings_of_best_estimation 的中心位置序列
        rnn_limit_rings_of_best_estimation_center = []
        for i in range(len(rnn_limit_rings_of_best_estimation)):
            # 判断 rnn_limit_rings_of_best_estimation[i] 是否为空
            if rnn_limit_rings_of_best_estimation[i].shape[0] == 0:
                # 加入一个 128 的 nan 向量
                rnn_limit_rings_of_best_estimation_center.append(np.full((128,), 0))
            else:
                rnn_limit_rings_of_best_estimation_center.append(np.mean(rnn_limit_rings_of_best_estimation[i], axis=(0,1)))

        rnn_limit_rings_of_best_estimation_center = np.array(rnn_limit_rings_of_best_estimation_center)
        print("rnn_limit_rings_of_best_estimation_center.shape: ", rnn_limit_rings_of_best_estimation_center.shape)

        file_name = "obs_data_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
        obs_file = np.load("./logs/" + file_name)
        diverse_set_trajectoies = obs_file["diverse_set_trajectoies"]
        print("diverse_set_trajectoies.shape: ", diverse_set_trajectoies.shape)

        return rnn_limit_rings_of_best_estimation_center, diverse_set_trajectoies
    
    configs = [

        # [nn_type, 6, 1, 100],
        # [nn_type, 7, 1, 100],
        # [nn_type, 8, 1, 100],
        # [nn_type, 9, 1, 100],
        # [nn_type, 10, 1, 100],
        # [nn_type, 11, 1, 100],
        # [nn_type, 12, 1, 100],
        # [nn_type, 13, 1, 100],
        # [nn_type, 14, 1, 100],
        # [nn_type, 15, 1, 100],

        [nn_type, 5, 1, 400],
        [nn_type, 6, 1, 400],
        [nn_type, 7, 1, 400],
        [nn_type, 8, 1, 400],
        [nn_type, 9, 1, 400],
        [nn_type, 10, 1, 400],
        [nn_type, 11, 1, 400],
        [nn_type, 12, 1, 400],
        [nn_type, 13, 1, 400],
        [nn_type, 14, 1, 400],
        
        ]

    diverse_set_trajectoies = []
    for i in range(len(configs)):
        _, dts = load_data_and_compute(configs[i][0], configs[i][1], configs[i][2], configs[i][3])
        diverse_set_trajectoies.append(dts)

    diverse_set_trajectoies_mat = []
    for i in range(len(diverse_set_trajectoies)):
        length_aligned_trj = preprocess(diverse_set_trajectoies[i], configs[-1][1])
        diverse_set_trajectoies_mat.append(length_aligned_trj)
        # print("diverse_set_trajectoies_mat[i].shape: ", diverse_set_trajectoies_mat[i].shape)
    diverse_set_trajectoies_mat = np.concatenate(diverse_set_trajectoies_mat, axis=0)
    print("diverse_set_trajectoies_mat.shape: ", diverse_set_trajectoies_mat.shape)

    diverse_set_trajectoies_mat_linear = diverse_set_trajectoies_mat.reshape(diverse_set_trajectoies_mat.shape[0], diverse_set_trajectoies_mat.shape[1]*diverse_set_trajectoies_mat.shape[2])
    print("diverse_set_trajectoies_mat_linear.shape: ", diverse_set_trajectoies_mat_linear.shape)


    """ version2: 对 ridge_images 进行 PCA
    """
    ridge_images = build_ridge_vmap(diverse_set_trajectoies_mat)
    print("RIs.shape: ", ridge_images.shape)
    # 将 RIs 从 (100, 21, 21) 转换成 (100, 441)
    ridge_images = ridge_images.reshape(ridge_images.shape[0], ridge_images.shape[1] * ridge_images.shape[2])
    print("RIs.shape: ", ridge_images.shape)

    # 对 ridge_images 进行 PCA
    pca = PCA()
    pca.fit(ridge_images)
    ridge_images_pca = pca.transform(ridge_images)

    ridge_images_pca_grouped = []
    for i in range(len(configs)):
        ridge_images_pca_grouped.append(ridge_images_pca[i*configs[i][3]:(i+1)*configs[i][3]])
        print("ridge_images_pca_grouped[i].shape: ", ridge_images_pca_grouped[i].shape)

    return copy.deepcopy(ridge_images_pca_grouped), copy.deepcopy(configs), copy.deepcopy(ridge_images)
    

def compute_policyring_manifold():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    def load_data_and_compute(nn_type, seq_len, redundancy, diverse_set_capacity):

        rnn_limit_rings_file_name = "./logs/rnn_limit_rings_of_best_estimation_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
        # 载入 npz 文件
        rnn_limit_rings_of_best_estimation_file = np.load(rnn_limit_rings_file_name)

        # 获取 npz 文件中的所有对象名称
        matrix_names = rnn_limit_rings_of_best_estimation_file.files

        rnn_limit_rings_of_best_estimation = []

        # 遍历对象名称，访问和操作每个矩阵对象
        for name in matrix_names:
            matrix = rnn_limit_rings_of_best_estimation_file[name]
            # 在这里进行对矩阵对象的操作
            # 例如，打印矩阵的形状
            # print(f"Matrix '{name}' shape: {matrix.shape}")
            rnn_limit_rings_of_best_estimation.append(matrix)

        # 求 rnn_limit_rings_of_best_estimation 的中心位置序列
        rnn_limit_rings_of_best_estimation_center = []
        for i in range(len(rnn_limit_rings_of_best_estimation)):
            # 判断 rnn_limit_rings_of_best_estimation[i] 是否为空
            if rnn_limit_rings_of_best_estimation[i].shape[0] == 0:
                # 加入一个 128 的 nan 向量
                rnn_limit_rings_of_best_estimation_center.append(np.full((128,), 0))
            else:
                rnn_limit_rings_of_best_estimation_center.append(np.mean(rnn_limit_rings_of_best_estimation[i], axis=(0,1)))
        rnn_limit_rings_of_best_estimation_center = np.array(rnn_limit_rings_of_best_estimation_center)
        print("rnn_limit_rings_of_best_estimation_center.shape: ", rnn_limit_rings_of_best_estimation_center.shape)

        file_name = "obs_data_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
        obs_file = np.load("./logs/" + file_name)
        diverse_set_trajectoies = obs_file["diverse_set_trajectoies"]
        print("diverse_set_trajectoies.shape: ", diverse_set_trajectoies.shape)

        return rnn_limit_rings_of_best_estimation_center, diverse_set_trajectoies
    
    configs = [

        # [nn_type, 6, 1, 100],
        # [nn_type, 7, 1, 100],
        # [nn_type, 8, 1, 100],
        # [nn_type, 9, 1, 100],
        # [nn_type, 10, 1, 100],
        # [nn_type, 11, 1, 100],
        # [nn_type, 12, 1, 100],
        # [nn_type, 13, 1, 100],
        # [nn_type, 14, 1, 100],
        # [nn_type, 15, 1, 100],

        [nn_type, 5, 1, 400],
        [nn_type, 6, 1, 400],
        [nn_type, 7, 1, 400],
        [nn_type, 8, 1, 400],
        [nn_type, 9, 1, 400],
        [nn_type, 10, 1, 400],
        [nn_type, 11, 1, 400],
        [nn_type, 12, 1, 400],
        [nn_type, 13, 1, 400],
        [nn_type, 14, 1, 400],
        
        ]

    rnn_limit_rings_of_best_estimation_centers = []
    rnn_limit_rings_of_best_estimation_centers_0 = []
    ring_lengths = []
    diverse_set_trajectoies = []
    for i in range(len(configs)):
        _rnn_limit_rings_of_best_estimation_centers, dts = load_data_and_compute(configs[i][0], configs[i][1], configs[i][2], configs[i][3])
        rnn_limit_rings_of_best_estimation_centers.append(_rnn_limit_rings_of_best_estimation_centers)
        if i == 2:
            rnn_limit_rings_of_best_estimation_centers_0 = _rnn_limit_rings_of_best_estimation_centers.copy()
        diverse_set_trajectoies.append(dts)
        ring_length = configs[i][1]
        data_cap = _rnn_limit_rings_of_best_estimation_centers.shape[0]
        ring_length_ = np.array([ring_length for i in range(data_cap)])
        ring_lengths.append(ring_length_)

    ring_lengths = np.array(ring_lengths)
    ring_lengths = ring_lengths.reshape(ring_lengths.shape[0]*ring_lengths.shape[1])
    print(ring_lengths)

    diverse_set_trajectoies_mat = []
    for i in range(len(diverse_set_trajectoies)):
        length_aligned_trj = preprocess(diverse_set_trajectoies[i], 15)
        diverse_set_trajectoies_mat.append(length_aligned_trj)
        # print("diverse_set_trajectoies_mat[i].shape: ", diverse_set_trajectoies_mat[i].shape)
    diverse_set_trajectoies_mat = np.concatenate(diverse_set_trajectoies_mat, axis=0)
    print("diverse_set_trajectoies_mat.shape: ", diverse_set_trajectoies_mat.shape)


    # 将 rnn_limit_rings_of_best_estimation_centers 所有元素拼接起来
    rnn_limit_rings_of_best_estimation_center_mat = np.concatenate(rnn_limit_rings_of_best_estimation_centers, axis=0)
    print("rnn_limit_rings_of_best_estimation_center_mat.shape: ", rnn_limit_rings_of_best_estimation_center_mat.shape)

    # 对 rnn_limit_rings_of_best_estimation_center_mat 进行 PCA
    pca = PCA()
    pca.fit(rnn_limit_rings_of_best_estimation_center_mat)
    rnn_limit_rings_of_best_estimation_center_mat_pca = pca.transform(rnn_limit_rings_of_best_estimation_center_mat)
    print("rnn_limit_rings_of_best_estimation_center_mat_pca.shape: ", rnn_limit_rings_of_best_estimation_center_mat_pca.shape)

    # 计算角度映射
    rnn_limit_rings_of_best_estimation_center_mat_pca_2d = rnn_limit_rings_of_best_estimation_center_mat_pca[:, :2]
    # 计算 rnn_limit_rings_of_best_estimation_center_mat_pca_3d 中每个点相对于中心点的角度
    rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles = []
    for i in range(rnn_limit_rings_of_best_estimation_center_mat_pca_2d.shape[0]):
        rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles.append(np.arctan2(rnn_limit_rings_of_best_estimation_center_mat_pca_2d[i,1], rnn_limit_rings_of_best_estimation_center_mat_pca_2d[i,0]))
    rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles = np.array(rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles)
    # 将 rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles 转换到 [0, 2pi] 区间
    rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles = rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles - rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles.min()

    # rnn_limit_rings_of_best_estimation_center_mat_pca = pca0.transform(rnn_limit_rings_of_best_estimation_center_mat)

    centers_all = [[] for i in range(15)]


    # 按照 ring_lengths 给 rnn_limit_rings_of_best_estimation_center_mat_pca 分配颜色值
    # 先创建一个长度为15的颜色列表
    colors = []
    # 指定随机种子为100
    np.random.seed(100)
    for i in range(15):
        colors.append(np.random.rand(3))
    
    sectors = 16
    for random_angle in range(0,360):

        progress_bar(random_angle, 360)

        random_angle *= (np.pi / 180)
        sector_angle = np.pi / sectors
        # 选中所有角度在 [random_angle - sector_angle, random_angle + sector_angle] 之间的点
        idx = np.where((rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles >= random_angle - sector_angle) & (rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles <= random_angle + sector_angle))
        rnn_limit_rings_of_best_estimation_center_mat_selected = rnn_limit_rings_of_best_estimation_center_mat_pca[idx]
        seq_len_selected = ring_lengths[idx]

        # # 给 rnn_limit_rings_of_best_estimation_center_mat_pca 中的每个点分配颜色
        # rnn_limit_rings_of_best_estimation_center_mat_pca_colors = []
        # for i in range(rnn_limit_rings_of_best_estimation_center_mat_selected.shape[0]):
        #     rnn_limit_rings_of_best_estimation_center_mat_pca_colors.append(colors[seq_len_selected[i]-1])

        # 计算 rnn_limit_rings_of_best_estimation_center_mat_selected 中具有相同的 seq_len_selected 的中心点
        rnn_limit_rings_of_best_estimation_center_mat_selected_centers = []
        for i in range(1, 16):
            idx = np.where(seq_len_selected == i)
            rnn_limit_rings_of_best_estimation_center_mat_selected_centers.append(np.mean(rnn_limit_rings_of_best_estimation_center_mat_selected[idx], axis=0))
            
        rnn_limit_rings_of_best_estimation_center_mat_selected_centers = np.array(rnn_limit_rings_of_best_estimation_center_mat_selected_centers)

        vdim0 = 0
        vdim1 = 1
        vdim2 = 2

        for i in range(rnn_limit_rings_of_best_estimation_center_mat_selected_centers.shape[0]):
            point_3d = np.array(
                [
                    rnn_limit_rings_of_best_estimation_center_mat_selected_centers[i,vdim0], 
                    rnn_limit_rings_of_best_estimation_center_mat_selected_centers[i,vdim1], 
                    rnn_limit_rings_of_best_estimation_center_mat_selected_centers[i,vdim2]
                ]
            )
            centers_all[i].append(point_3d)

    centers_all = np.array(centers_all)
    print("centers_all.shape: ", centers_all.shape)

    return copy.deepcopy(centers_all), copy.deepcopy(configs), copy.deepcopy(rnn_limit_rings_of_best_estimation_center_mat)


if __name__ == "__main__":

    rpl_config = ReplayConfig()
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)
        action_only_model = GRU_action(hidden_dims = rpl_config.nn_size)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)

    if rpl_config.nn_type == "vanilla":
        W_A = tree_leaves[3]
    elif rpl_config.nn_type == "gru":
        W_A = tree_leaves[1]
    
    # 交换 W_A 的第一维和第二维
    W_A = np.swapaxes(W_A, 0, 1)
    print("W_A.shape: ", W_A.shape)

    # # 对 W_A 所有行向量进行归一化
    # W_A = W_A / np.linalg.norm(W_A, axis=1)[:, np.newaxis]
    # # 计算 W_A 矩阵中，所有行向量之间两两之间的点积
    # W_A_dot = np.abs(np.dot(W_A, W_A.T))
    # print(W_A_dot)

    # exit()

    ridge_images_pca_grouped, configs, ridge_images = compute_trajectory_manifold()

    centers_all, configs1, rnn_limit_rings_of_best_estimation_center_mat = compute_policyring_manifold()

    print("shape of ridge_images: ", ridge_images.shape)
    print("shape of rnn_limit_rings_of_best_estimation_center_mat: ", rnn_limit_rings_of_best_estimation_center_mat.shape)

    # 统计 rnn_limit_rings_of_best_estimation_center_mat 的第二个维度上，每个维度的均值，以及最大值和最小值，以及方差
    raw_mean = np.mean(rnn_limit_rings_of_best_estimation_center_mat, axis=0)
    raw_max = np.max(rnn_limit_rings_of_best_estimation_center_mat, axis=0)
    raw_min = np.min(rnn_limit_rings_of_best_estimation_center_mat, axis=0)
    raw_std = np.std(rnn_limit_rings_of_best_estimation_center_mat, axis=0)
    print("raw_mean: ", raw_mean.shape)
    print("raw_max: ", raw_max.shape)
    print("raw_min: ", raw_min.shape)
    print("raw_std: ", raw_std.shape)
    # 生成一个以 raw_mean 为中心，raw_std 为标准差的正态分布样本群，数量与 rnn_limit_rings_of_best_estimation_center_mat 第一个维度相同
    simulated_neural_data = np.random.normal(raw_mean, raw_std, size=rnn_limit_rings_of_best_estimation_center_mat.shape)
    print("simulated_neural_data: ", simulated_neural_data.shape)

    # 在 ridge_images 中加入平均噪声，分布在 [0.1, 0.2] 区间
    noise = np.random.uniform(-0.001, 0.001, size=ridge_images.shape)
    ridge_images = ridge_images + noise
    # 在 rnn_limit_rings_of_best_estimation_center_mat 中加入平均噪声，分布在 [-0.1, 0.1] 区间
    noise = np.random.uniform(-0.001, 0.001, size=rnn_limit_rings_of_best_estimation_center_mat.shape)
    rnn_limit_rings_of_best_estimation_center_mat = rnn_limit_rings_of_best_estimation_center_mat + noise

    train_view_1 = rnn_limit_rings_of_best_estimation_center_mat
    train_view_2 = ridge_images

    print("shape of train_view_1: ", train_view_1.shape)
    print("shape of train_view_2: ", train_view_2.shape)

    # 对 train_view_1/2 进行 pca
    pca = PCA()
    pca.fit(train_view_1)
    train_view_1 = pca.transform(train_view_1)
    pca1 = PCA()
    pca1.fit(train_view_2)
    train_view_2 = pca1.transform(train_view_2)

    # using CCA from article 'Preserved neural dynamics across animals performing similar behaviour'
    A, B, r, U, V, data_prop = canoncorr(train_view_1, train_view_2, fullReturn=True)
    print(A.shape, B.shape, r.shape, U.shape, V.shape)

    # print("=============== data_prop[0] =============== ", data_prop[0])
    # print("=============== data_prop[1] =============== ", data_prop[1])

    # exit()

    # # 测试还原数据
    # train_view_1_ = inverse_cca(A, U, data_prop)                # 首先 inverse CCA
    # rnn_data = pca.inverse_transform(train_view_1_)             # 然后 inverse PCA
    # print("shape of rnn_data: ", rnn_data.shape)
    # print("shape of rnn_limit_rings_of_best_estimation_center_mat: ", rnn_limit_rings_of_best_estimation_center_mat.shape)
    # # 打印 rnn_limit_rings_of_best_estimation_center_mat 与 rnn_data 的差值的绝对值总和
    # print("sum of abs diff: ", np.sum(np.abs(rnn_limit_rings_of_best_estimation_center_mat - rnn_data)))

    transformd_cca_representations = U.copy()
    print("transformd_representations.shape: ", transformd_cca_representations.shape)

    transformd_pca_representations = pca.transform(rnn_limit_rings_of_best_estimation_center_mat)


    task_selected = "./logs/rf_pass_task_server.txt"
    dir_path = "./logs/task_envs_gru_server/"
    # 读入 task_selected 中的每一行，每一行是一个任务的名称
    with open(task_selected, "r") as f:
        task_list = f.readlines()
        task_list = [task.strip() for task in task_list]
    # 将 task_list 中的每个任务名称转换为任务的路径，加上 dir_path
    task_list = [dir_path + task for task in task_list]
    landscapes, states, goals = [], [], []
    for tt in range(len(task_list)):
        progress_bar(tt, len(task_list))
        task_pth = task_list[tt]
        landscape, state, goal = load_task(task_pth)
        landscapes.append(landscape)
        states.append(state)
        goals.append(goal)

    states = jnp.array(states)
    goals = jnp.array(goals)

    print("shape of states: ", states.shape)
    print("shape of goals: ", goals.shape)

    """ create grid env
    """
    start_time = time.time()

    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=False)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = states.copy()
    # set goals of GE
    GE.batched_goals = goals.copy()
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs

    rnn_state = model.initial_state(GE.num_envs)
    step_count = 0
    trajectories = []
    goal_record = []
    actions = []

    rnn_trajectories = []

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        step_count += 1

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)
        batched_goal_reached, concat_obs = GE.step(batched_actions, reset=True)

        trajectories.append(np.array(GE.batched_states))
        goal_record.append(batched_goal_reached)
        actions.append(np.array(batched_actions))
        rnn_trajectories.append(np.array(rnn_state))

    print("shape of trajectories: ", np.array(trajectories).shape)
    print("shape of goal_record: ", np.array(goal_record).shape)
    print("shape of rnn_state: ", rnn_state.shape)

    trajectories = np.transpose(np.array(trajectories), (1, 0, 2))
    print("shape of trajectories: ", trajectories.shape)
    
    # switch the dimension1 and dimension2 of goal_record
    goal_record = np.array(goal_record).T
    print("shape of goal_record: ", goal_record.shape)
    actions = np.array(actions).T
    print("shape of actions: ", actions.shape)

    rnn_trajectories = np.transpose(np.array(rnn_trajectories), (1, 0, 2))
    print("shape of rnn_trajectories: ", rnn_trajectories.shape)
    print("memory usage of rnn_trajectories in GiB: ", rnn_trajectories.nbytes / 1024**3)

    start_positions = GE.init_batched_states
    goal_positions = GE.init_batched_goals

    print("shape of starts: ", start_positions.shape)
    print("shape of goals: ", goal_positions.shape)

    ''' extract final trajectories
    '''
    # 生成 goal_record、trajectories 的倒序版本
    goal_reached_reverse = goal_record[:, ::-1]
    trajectories_reverse = trajectories[:, ::-1]
    rnn_trajectories_reverse = rnn_trajectories[:, ::-1]
    final_traj_list = []
    final_rnn_trajectories_list = []

    print("shape of goal_reached_reverse: ", goal_reached_reverse.shape)
    print("shape of trajectories_reverse: ", trajectories_reverse.shape)

    # 提取所有轨迹的最后一个完整周期的轨迹
    print("extracting final trajectories...")
    for i in range(trajectories_reverse.shape[0]):
        progress_bar(i, trajectories_reverse.shape[0])
        # 找到最后一次 goal（包含） 到前一次 goal的下一步（包含）之间的轨迹
        last_goal = 0
        second_last_goal = 0
        for j in range(goal_reached_reverse.shape[1]):
            if goal_reached_reverse[i,j] == True:
                last_goal = j
                break
        for j in range(last_goal+1, goal_reached_reverse.shape[1]):
            if goal_reached_reverse[i,j] == True:
                second_last_goal = j
                break
        # 提取最终轨迹
        final_traj_reverse = trajectories_reverse[i,last_goal:second_last_goal]
        final_traj = final_traj_reverse[::-1]
        final_traj_list.append(final_traj - final_traj[0])

        # 提取 rnn 最终轨迹
        final_rnn_traj_reverse = rnn_trajectories_reverse[i,last_goal:second_last_goal]
        final_rnn_traj = final_rnn_traj_reverse[::-1]
        final_rnn_trajectories_list.append(final_rnn_traj)

    print("len of final_rnn_trajectories_list: ", len(final_rnn_trajectories_list))

    stable_states = []
    for i in range(len(final_rnn_trajectories_list)):
        stable_states.append(final_rnn_trajectories_list[i][-1])
    stable_states = np.array(stable_states)
    print("shape of stable_states: ", stable_states.shape)

    print("----------------- len of final_traj_list: ", len(final_traj_list))

    def find_stable_position(record):
        var_threshold = 0.5
        optimization_scale = 2

        trial_len = 0
        trial_len_old = 0
        step_ = 0
        trial_len_var = 0
        first_trial_len = -1
        for i in range(record.shape[0]):
            step_ += 1
            if record[i] == True:
                if first_trial_len == -1:
                    first_trial_len = step_
                trial_len = step_
                step_ = 0
                # compute the variance of trial_len
                trial_len_var = 0.9 * abs(trial_len - trial_len_old) + 0.1 * trial_len_var
                trial_len_old = trial_len

                if trial_len_var <= var_threshold and first_trial_len/trial_len >= optimization_scale and trial_len <= 20:
                    return i
                
        return 1000
    
    stable_positions = []
    for i in range(goal_record.shape[0]):
        progress_bar(i, goal_record.shape[0])
        stable_position = find_stable_position(goal_record[i])
        if stable_position!= 1000:
            stable_positions.append(stable_position)
    stable_positions = np.array(stable_positions)

    # 绘制 stable_positions 的直方图
    plt.hist(stable_positions, bins=50)
    plt.show()

    # 记录每一种维度删减方案的结果
    stable_positions_hists = []
    error_eucledian_hists = []

    for n_dims in range(3,20):

        ''' 在特征空间进行操作，然后将 stable_states 恢复到原始空间
        '''
        # n_dims = 3
        stable_states_altered = stable_states.copy()
        for i in range(stable_states.shape[0]):
            progress_bar(i, stable_states.shape[0])
            stable_state = stable_states[i]
            state_transformed = pca.transform(stable_state.reshape(1, -1))   # PCA transform
            state_transformed[0] = (state_transformed[0] - data_prop[0]) / data_prop[2]
            state_transformed[0] = transform_cca(state_transformed[0], A)                   # CCA transform

            ''' 仅保留前 n_dims 个维度的信息
            '''
            state_transformed[0, n_dims:] = 0
            
            ''' 仅删除前 n_dims 个维度的信息
            '''
            # state_transformed[0, 0:n_dims] = 0

            # 逆变换回到原始空间
            state_pca = inverse_cca(A, state_transformed, data_prop)
            stable_states_altered[i] = pca.inverse_transform(state_pca)[0]
        

        ''' 重新运行
        '''

        GE.reset()
        # set states of GE
        GE.batched_states = states.copy()
        # set goals of GE
        GE.batched_goals = goals.copy()
        GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
        GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
        GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
        GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
        concat_obs = GE.concat_obs

        rnn_state = jnp.array(stable_states_altered)
        # rnn_state = model.initial_state(GE.num_envs)

        step_count = 0
        trajectories = []
        goal_record = []
        actions = []

        rnn_trajectories = []

        ls_id = 7284

        for t in range(rpl_config.life_duration):

            progress_bar(t, rpl_config.life_duration)

            step_count += 1

            """ model forward and step the env
            """
            rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
            batched_actions = get_action_vmap(y1)
            batched_goal_reached, concat_obs = GE.step(batched_actions, reset=True)

            trajectories.append(np.array(GE.batched_states))
            goal_record.append(batched_goal_reached)

            # img = GE.render(ls_id)
            # cv2.imshow("img", img)
            # cv2.waitKey(1)

        print("shape of trajectories: ", np.array(trajectories).shape)
        print("shape of goal_record: ", np.array(goal_record).shape)
        print("shape of rnn_state: ", rnn_state.shape)

        trajectories = np.transpose(np.array(trajectories), (1, 0, 2))
        print("shape of trajectories: ", trajectories.shape)

        # 对齐到起点位置
        for i in range(trajectories.shape[0]):
            progress_bar(i, trajectories.shape[0])
            trajectories[i] = trajectories[i] - trajectories[i,0]
        print("shape of trajectories: ", trajectories.shape)
        
        # switch the dimension1 and dimension2 of goal_record
        goal_record = np.array(goal_record).T
        print("shape of goal_record: ", goal_record.shape)

        stable_positions = []
        for i in range(goal_record.shape[0]):
            progress_bar(i, goal_record.shape[0])
            stable_position = find_stable_position(goal_record[i])
            if stable_position!= 1000:
                stable_positions.append(stable_position)
        stable_positions = np.array(stable_positions)

        # # 绘制 stable_positions 的直方图
        # plt.hist(stable_positions, bins=50)
        # plt.show()

        # 将刚才的直方图结果保存到一个数组里,使用 numpy 进行直方图计算
        # 定义 bin 的边界
        bin_edges = np.linspace(0, 1000, 51)  # 51 个点定义了 50 个 bin
        # 使用 np.histogram 进行统计
        stable_positions_hist, _ = np.histogram(stable_positions, bins=bin_edges)
        stable_positions_hists.append(stable_positions_hist)

        ''' 现在计算重置之后的第一轮 trial 中前 N 步的物理轨迹匹配度（例如 Fréchet distance）
        '''
        error_eucledian = []
        for i in range(len(final_traj_list)):
            progress_bar(i, trajectories.shape[0])
            
            l_ = final_traj_list[i].shape[0]
            new_trj = trajectories[i, :l_]

            diff = final_traj_list[i] - new_trj
            # print("diff.shape: ", diff.shape)

            # err_sum_hammimg = np.sum(np.mean(np.abs(diff), axis=0))
            err_sum_eucledian = np.linalg.norm(np.mean(np.abs(diff), axis=0))

            error_eucledian.append(err_sum_eucledian)

        error_eucledian = np.array(error_eucledian)
        # # 绘制 error_eucledian 的直方图
        # plt.hist(error_eucledian.flatten(), bins=50)
        # plt.show()

        # 将刚才的直方图结果保存到一个数组里,使用 numpy 进行直方图计算
        # 定义 bin 的边界
        bin_edges = np.linspace(0, 10, 51)  # 51 个点定义了 50 个 bin
        error_eucledian_hist, _ = np.histogram(error_eucledian.flatten(), bins=bin_edges)
        error_eucledian_hists.append(error_eucledian_hist)

    # # 将 stable_positions_hists 绘制成曲线图，所有曲线在同一张图上，并进行完整的标注
    # plt.figure()
    # for i in range(len(stable_positions_hists)):
    #     plt.plot(stable_positions_hists[i][1][:-1], stable_positions_hists[i][0], label="n_dims = " + str(i+3))
    # plt.legend()
    # plt.show()

    # # 将 error_eucledian_hists 绘制成曲线图，所有曲线在同一张图上，并进行完整的标注
    # plt.figure()
    # for i in range(len(error_eucledian_hists)):
    #     plt.plot(error_eucledian_hists[i][1][:-1], error_eucledian_hists[i][0], label="n_dims = " + str(i+3))
    # plt.legend()
    # plt.show()
        

    stable_positions_hists = np.array(stable_positions_hists)
    error_eucledian_hists = np.array(error_eucledian_hists)

    # 指定输出的 HTML 文件
    output_file("interactive_histograms1.html")
    # 生成数据
    num_curves = stable_positions_hists.shape[0]
    num_bins = 50
    x = np.arange(num_bins)
    data = stable_positions_hists
    names = [f'曲线 {i+3}' for i in range(num_curves)]
    colors = Category20[20]  # 获取20种不同的颜色

    # 自定义的 x 轴标签范围（例如 0 到 1000）
    custom_x_labels = np.linspace(0, 1000, num_bins)

    # 准备数据源
    source = ColumnDataSource(data=dict(
        xs = [custom_x_labels.tolist()] * num_curves,   # x坐标列表，每条曲线对应一个x数组
        ys = data.tolist(),               # y坐标列表，每条曲线对应一个y数组
        names = names,                    # 曲线名称列表
        line_color=colors                 # 曲线颜色列表
    ))

    # 创建绘图对象，设置图表宽度填满页面宽度
    p = figure(title="交互式直方图曲线",
            tools="pan,wheel_zoom,box_zoom,reset,save",
            sizing_mode="stretch_width",  # 使图表宽度填满页面
            height=600)                   # 可根据需要调整高度

    # 绘制多条曲线
    renderer = p.multi_line(xs='xs', ys='ys', line_color='line_color', source=source, line_width=2, alpha=0.6,
                            hover_line_color='red',    # 高亮时的线条颜色设为红色
                            hover_line_alpha=1.0, hover_line_width=4)

    # 添加Hover工具
    hover = HoverTool(
        tooltips=[('曲线名称', '@names')],
        renderers=[renderer],
        line_policy='nearest',
        show_arrow=False,
        mode='mouse'
    )
    p.add_tools(hover)

    # 设置 x 轴范围为自定义范围
    p.x_range.start = custom_x_labels[0]
    p.x_range.end = custom_x_labels[-1]

    # 显示绘图
    show(p)


    # 指定输出的 HTML 文件
    output_file("interactive_histograms2.html")
    # 生成数据
    num_curves = error_eucledian_hists.shape[0]
    num_bins = 50
    x = np.arange(num_bins)
    data = error_eucledian_hists
    names = [f'曲线 {i+3}' for i in range(num_curves)]
    colors = Category20[20]  # 获取20种不同的颜色

    # 自定义的 x 轴标签范围（例如 0 到 1000）
    custom_x_labels = np.linspace(0, 10, num_bins)

    # 准备数据源
    source = ColumnDataSource(data=dict(
        xs = [custom_x_labels.tolist()] * num_curves,   # x坐标列表，每条曲线对应一个x数组
        ys = data.tolist(),               # y坐标列表，每条曲线对应一个y数组
        names = names,                    # 曲线名称列表
        line_color=colors                 # 曲线颜色列表
    ))

    # 创建绘图对象，设置图表宽度填满页面宽度
    p = figure(title="交互式直方图曲线",
            tools="pan,wheel_zoom,box_zoom,reset,save",
            sizing_mode="stretch_width",  # 使图表宽度填满页面
            height=600)                   # 可根据需要调整高度

    # 绘制多条曲线
    renderer = p.multi_line(xs='xs', ys='ys', line_color='line_color', source=source, line_width=2, alpha=0.6,
                            hover_line_color='red',    # 高亮时的线条颜色设为红色
                            hover_line_alpha=1.0, hover_line_width=4)

    # 添加Hover工具
    hover = HoverTool(
        tooltips=[('曲线名称', '@names')],
        renderers=[renderer],
        line_policy='nearest',
        show_arrow=False,
        mode='mouse'
    )
    p.add_tools(hover)

    # 设置 x 轴范围为自定义范围
    p.x_range.start = custom_x_labels[0]
    p.x_range.end = custom_x_labels[-1]

    # 显示绘图
    show(p)



