from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from flax import linen as nn

from jax import tree_util

# from cca_zoo.linear import CCA, rCCA
# from cca_zoo.model_selection import GridSearchCV
# from cca_zoo.deep import (
#     DCCA,
#     DCCA_EY,
#     DCCA_NOI,
#     DCCA_SDL,
#     BarlowTwins,
#     VICReg,
#     architectures,
# )
# from cca_zoo.visualisation import (
#     ScoreScatterDisplay,
#     UMAPScoreDisplay,
#     TSNEScoreDisplay,
# )
# import seaborn as sns
import logging
import matplotlib.pyplot as plt
# import seaborn as sns
import pandas as pd

import numpy as np
from scipy.linalg import qr, svd, inv
import logging

from scipy.spatial.distance import pdist, squareform
from scipy.stats import pearsonr

def canoncorr(X0: np.array, Y0: np.array, fullReturn: bool = False) -> np.array:
    """
    Canonical Correlation Analysis (CCA) with added diagnostics and preprocessing
    
    Parameters:
    X, Y: (samples/observations) x (features) matrix
    fullReturn: whether all outputs should be returned or just `r` be returned
    
    Returns: A, B, r, U, V (if fullReturn is True) or just r (if fullReturn is False)
    A, B: Canonical coefficients for X and Y
    U, V: Canonical scores for the variables X and Y
    r: Canonical correlations
    """
    n, p1 = X0.shape
    p2 = Y0.shape[1]
    
    # Data diagnostics
    print(f"X shape: {X0.shape}, Y shape: {Y0.shape}")
    print(f"X condition number: {np.linalg.cond(X0)}")
    print(f"Y condition number: {np.linalg.cond(Y0)}")
    
    if p1 >= n or p2 >= n:
        logging.warning('Not enough samples, might cause problems')

    # Preprocessing: Standardize the variables
    X = (X0 - np.mean(X0, 0)) / np.std(X0, 0)
    Y = (Y0 - np.mean(Y0, 0)) / np.std(Y0, 0)
        
    # X = (X0 - np.mean(X0, 0))
    # Y = (Y0 - np.mean(Y0, 0))

    # Factor the inputs, and find a full rank set of columns if necessary
    Q1, T11, perm1 = qr(X, mode='economic', pivoting=True)
    Q2, T22, perm2 = qr(Y, mode='economic', pivoting=True)

    # Determine ranks with a more stringent threshold
    tol = np.finfo(float).eps * 100  # Increased tolerance
    rankX = np.sum(np.abs(np.diagonal(T11)) > tol * np.abs(T11[0, 0]))
    rankY = np.sum(np.abs(np.diagonal(T22)) > tol * np.abs(T22[0, 0]))

    print(f"Rank of X: {rankX}, Rank of Y: {rankY}")

    if rankX == 0:
        raise ValueError('X has zero rank')
    elif rankX < p1:
        logging.warning('X is not full rank')
        Q1 = Q1[:, :rankX]
        T11 = T11[:rankX, :rankX]

    if rankY == 0:
        raise ValueError('Y has zero rank')
    elif rankY < p2:
        logging.warning('Y is not full rank')
        Q2 = Q2[:, :rankY]
        T22 = T22[:rankY, :rankY]

    # Compute canonical coefficients and canonical correlations
    d = min(rankX, rankY)
    # L, D, M = svd(Q1.T @ Q2, full_matrices=False)
    L,D,M = svd(Q1.T @ Q2, full_matrices=True, check_finite=True, lapack_driver='gesdd')
    M = M.T

    A = inv(T11) @ L[:, :d] * np.sqrt(n - 1)
    B = inv(T22) @ M[:, :d] * np.sqrt(n - 1)
    r = D[:d]
    
    # Remove roundoff errors
    r = np.clip(r, 0, 1)

    if not fullReturn:
        return r

    # Put coefficients back to their full size and correct order
    A_full = np.zeros((p1, d))
    A_full[perm1, :] = np.vstack((A, np.zeros((p1 - rankX, d))))
    
    B_full = np.zeros((p2, d))
    B_full[perm2, :] = np.vstack((B, np.zeros((p2 - rankY, d))))

    # Compute the canonical variates
    U = X @ A_full
    V = Y @ B_full

    # U = X0 @ A_full
    # V = Y0 @ B_full

    return A_full, B_full, r, U, V


def plot_lollipop(scores, title="Lollipop Plot of Scores", xlabel="Index", ylabel="Score", figsize=(12, 6)):
    """
    绘制带连线和数值标注的棒棒糖图，数值保留两位小数，x轴标签从1开始
    
    参数:
    scores : list 或 numpy array
        要绘制的数据
    title : str, 可选
        图表标题
    xlabel : str, 可选
        x轴标签
    ylabel : str, 可选
        y轴标签
    figsize : tuple, 可选
        图表大小
    """
    
    # 创建对应的 x 值
    x = np.arange(len(scores))

    # 创建图表
    fig, ax = plt.subplots(figsize=figsize)

    # 绘制垂直线
    ax.vlines(x, 0, scores, colors='gray', lw=1, alpha=0.5)

    # 绘制数据点并连线
    ax.plot(x, scores, color='black', marker='o', markersize=8, linestyle='-', linewidth=1)

    # 添加数值标注，保留两位小数
    for i, score in enumerate(scores):
        ax.annotate(f'{score:.2f}', (i, score), textcoords="offset points", 
                    xytext=(0,10), ha='center', va='bottom', fontsize=8)

    # 设置坐标轴范围
    ax.set_xlim(-0.5, len(scores) - 0.5)
    ax.set_ylim(0, max(scores) * 1.2)  # 增加上界以容纳标注

    # 设置标题和标签
    ax.set_title(title)
    ax.set_xlabel(xlabel)
    ax.set_ylabel(ylabel)

    # 移除顶部和右侧边框
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)

    # 移除网格线
    ax.grid(False)

    # 调整 x 轴刻度，标签从1开始
    ax.set_xticks(x)
    ax.set_xticklabels(range(1, len(scores) + 1))

    # 设置 y 范围 0-1
    ax.set_ylim(0, 1.1)

    plt.tight_layout()
    plt.show()


def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal


def preprocess(trjs, max_length):
    processed_trjs = []
    for i in range(trjs.shape[0]):
        trj = trjs[i]
        if trj.shape[0] < max_length:
            last_element = trj[-1]
            processed_trj = np.concatenate([trj, np.repeat(last_element[np.newaxis,:], max_length - trj.shape[0], axis=0)], axis=0)
            processed_trjs.append(processed_trj)
        else:
            processed_trjs.append(trj)
    processed_trjs = np.array(processed_trjs)
    return processed_trjs
    
@jax.jit
def build_radiance_field(p):
    img = jnp.zeros((21, 21))
    top = 10
    bottom = 0
    effective_radius = 21*1.414
    px, py = p[0], p[1]

    x_coord_map = jnp.arange(img.shape[0])
    x_coord_map = jnp.repeat(x_coord_map[:, jnp.newaxis], img.shape[1], axis=1)

    y_coord_map = jnp.arange(img.shape[1])
    y_coord_map = jnp.repeat(y_coord_map[jnp.newaxis, :], img.shape[0], axis=0)
    
    dist = jnp.sqrt((x_coord_map - px)**2 + (y_coord_map - py)**2)
    img = jnp.where(dist < effective_radius,
                    (top - bottom) * (effective_radius - dist) / effective_radius + bottom,
                    img)
    return img

@jax.jit
def get_max_radiance_field(imgs):
    img = jnp.max(imgs, axis=0)
    return img

@jax.jit
def build_ridge(A):

    # 将 A 的第一个点对齐到 (10,10)
    A = A-A[0]+jnp.array([10,10])

    # 使用 jnp.map 来并行计算每个辐射场图像
    imgs = jax.vmap(build_radiance_field)(A)
    img = get_max_radiance_field(imgs)
    return img

build_ridge_vmap = jax.vmap(build_ridge)


def compute_trajectory_manifold():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    def load_data_and_compute(nn_type, seq_len, redundancy, diverse_set_capacity):

        rnn_limit_rings_file_name = "./logs/rnn_limit_rings_of_best_estimation_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
        # 载入 npz 文件
        rnn_limit_rings_of_best_estimation_file = np.load(rnn_limit_rings_file_name)

        # 获取 npz 文件中的所有对象名称
        matrix_names = rnn_limit_rings_of_best_estimation_file.files

        rnn_limit_rings_of_best_estimation = []

        # 遍历对象名称，访问和操作每个矩阵对象
        for name in matrix_names:
            matrix = rnn_limit_rings_of_best_estimation_file[name]
            # 在这里进行对矩阵对象的操作
            # 例如，打印矩阵的形状
            # print(f"Matrix '{name}' shape: {matrix.shape}")
            rnn_limit_rings_of_best_estimation.append(matrix)

        # 求 rnn_limit_rings_of_best_estimation 的中心位置序列
        rnn_limit_rings_of_best_estimation_center = []
        for i in range(len(rnn_limit_rings_of_best_estimation)):
            # 判断 rnn_limit_rings_of_best_estimation[i] 是否为空
            if rnn_limit_rings_of_best_estimation[i].shape[0] == 0:
                # 加入一个 128 的 nan 向量
                rnn_limit_rings_of_best_estimation_center.append(np.full((128,), 0))
            else:
                rnn_limit_rings_of_best_estimation_center.append(np.mean(rnn_limit_rings_of_best_estimation[i], axis=(0,1)))

        rnn_limit_rings_of_best_estimation_center = np.array(rnn_limit_rings_of_best_estimation_center)
        print("rnn_limit_rings_of_best_estimation_center.shape: ", rnn_limit_rings_of_best_estimation_center.shape)

        file_name = "obs_data_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
        obs_file = np.load("./logs/" + file_name)
        diverse_set_trajectoies = obs_file["diverse_set_trajectoies"]
        print("diverse_set_trajectoies.shape: ", diverse_set_trajectoies.shape)

        return rnn_limit_rings_of_best_estimation_center, diverse_set_trajectoies
    
    configs = [

        # [nn_type, 6, 1, 100],
        # [nn_type, 7, 1, 100],
        # [nn_type, 8, 1, 100],
        # [nn_type, 9, 1, 100],
        # [nn_type, 10, 1, 100],
        # [nn_type, 11, 1, 100],
        # [nn_type, 12, 1, 100],
        # [nn_type, 13, 1, 100],
        # [nn_type, 14, 1, 100],
        # [nn_type, 15, 1, 100],

        [nn_type, 5, 1, 400],
        [nn_type, 6, 1, 400],
        [nn_type, 7, 1, 400],
        [nn_type, 8, 1, 400],
        [nn_type, 9, 1, 400],
        [nn_type, 10, 1, 400],
        [nn_type, 11, 1, 400],
        [nn_type, 12, 1, 400],
        [nn_type, 13, 1, 400],
        [nn_type, 14, 1, 400],
        
        ]

    diverse_set_trajectoies = []
    for i in range(len(configs)):
        _, dts = load_data_and_compute(configs[i][0], configs[i][1], configs[i][2], configs[i][3])
        diverse_set_trajectoies.append(dts)

    diverse_set_trajectoies_mat = []
    for i in range(len(diverse_set_trajectoies)):
        length_aligned_trj = preprocess(diverse_set_trajectoies[i], configs[-1][1])
        diverse_set_trajectoies_mat.append(length_aligned_trj)
        # print("diverse_set_trajectoies_mat[i].shape: ", diverse_set_trajectoies_mat[i].shape)
    diverse_set_trajectoies_mat = np.concatenate(diverse_set_trajectoies_mat, axis=0)
    print("diverse_set_trajectoies_mat.shape: ", diverse_set_trajectoies_mat.shape)

    diverse_set_trajectoies_mat_linear = diverse_set_trajectoies_mat.reshape(diverse_set_trajectoies_mat.shape[0], diverse_set_trajectoies_mat.shape[1]*diverse_set_trajectoies_mat.shape[2])
    print("diverse_set_trajectoies_mat_linear.shape: ", diverse_set_trajectoies_mat_linear.shape)


    """ version2: 对 ridge_images 进行 PCA
    """
    ridge_images = build_ridge_vmap(diverse_set_trajectoies_mat)
    print("RIs.shape: ", ridge_images.shape)
    # 将 RIs 从 (100, 21, 21) 转换成 (100, 441)
    ridge_images = ridge_images.reshape(ridge_images.shape[0], ridge_images.shape[1] * ridge_images.shape[2])
    print("RIs.shape: ", ridge_images.shape)

    # 对 ridge_images 进行 PCA
    pca = PCA()
    pca.fit(ridge_images)
    ridge_images_pca = pca.transform(ridge_images)

    ridge_images_pca_grouped = []
    for i in range(len(configs)):
        ridge_images_pca_grouped.append(ridge_images_pca[i*configs[i][3]:(i+1)*configs[i][3]])
        print("ridge_images_pca_grouped[i].shape: ", ridge_images_pca_grouped[i].shape)

    return copy.deepcopy(ridge_images_pca_grouped), copy.deepcopy(configs), copy.deepcopy(ridge_images)
    

def compute_policyring_manifold():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    def load_data_and_compute(nn_type, seq_len, redundancy, diverse_set_capacity):

        rnn_limit_rings_file_name = "./logs/rnn_limit_rings_of_best_estimation_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
        # 载入 npz 文件
        rnn_limit_rings_of_best_estimation_file = np.load(rnn_limit_rings_file_name)

        # 获取 npz 文件中的所有对象名称
        matrix_names = rnn_limit_rings_of_best_estimation_file.files

        rnn_limit_rings_of_best_estimation = []

        # 遍历对象名称，访问和操作每个矩阵对象
        for name in matrix_names:
            matrix = rnn_limit_rings_of_best_estimation_file[name]
            # 在这里进行对矩阵对象的操作
            # 例如，打印矩阵的形状
            # print(f"Matrix '{name}' shape: {matrix.shape}")
            rnn_limit_rings_of_best_estimation.append(matrix)

        # 求 rnn_limit_rings_of_best_estimation 的中心位置序列
        rnn_limit_rings_of_best_estimation_center = []
        for i in range(len(rnn_limit_rings_of_best_estimation)):
            # 判断 rnn_limit_rings_of_best_estimation[i] 是否为空
            if rnn_limit_rings_of_best_estimation[i].shape[0] == 0:
                # 加入一个 128 的 nan 向量
                rnn_limit_rings_of_best_estimation_center.append(np.full((128,), 0))
            else:
                rnn_limit_rings_of_best_estimation_center.append(np.mean(rnn_limit_rings_of_best_estimation[i], axis=(0,1)))
        rnn_limit_rings_of_best_estimation_center = np.array(rnn_limit_rings_of_best_estimation_center)
        print("rnn_limit_rings_of_best_estimation_center.shape: ", rnn_limit_rings_of_best_estimation_center.shape)

        file_name = "obs_data_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
        obs_file = np.load("./logs/" + file_name)
        diverse_set_trajectoies = obs_file["diverse_set_trajectoies"]
        print("diverse_set_trajectoies.shape: ", diverse_set_trajectoies.shape)

        return rnn_limit_rings_of_best_estimation_center, diverse_set_trajectoies
    
    configs = [

        # [nn_type, 6, 1, 100],
        # [nn_type, 7, 1, 100],
        # [nn_type, 8, 1, 100],
        # [nn_type, 9, 1, 100],
        # [nn_type, 10, 1, 100],
        # [nn_type, 11, 1, 100],
        # [nn_type, 12, 1, 100],
        # [nn_type, 13, 1, 100],
        # [nn_type, 14, 1, 100],
        # [nn_type, 15, 1, 100],

        [nn_type, 5, 1, 400],
        [nn_type, 6, 1, 400],
        [nn_type, 7, 1, 400],
        [nn_type, 8, 1, 400],
        [nn_type, 9, 1, 400],
        [nn_type, 10, 1, 400],
        [nn_type, 11, 1, 400],
        [nn_type, 12, 1, 400],
        [nn_type, 13, 1, 400],
        [nn_type, 14, 1, 400],
        
        ]

    rnn_limit_rings_of_best_estimation_centers = []
    rnn_limit_rings_of_best_estimation_centers_0 = []
    ring_lengths = []
    diverse_set_trajectoies = []
    for i in range(len(configs)):
        _rnn_limit_rings_of_best_estimation_centers, dts = load_data_and_compute(configs[i][0], configs[i][1], configs[i][2], configs[i][3])
        rnn_limit_rings_of_best_estimation_centers.append(_rnn_limit_rings_of_best_estimation_centers)
        if i == 2:
            rnn_limit_rings_of_best_estimation_centers_0 = _rnn_limit_rings_of_best_estimation_centers.copy()
        diverse_set_trajectoies.append(dts)
        ring_length = configs[i][1]
        data_cap = _rnn_limit_rings_of_best_estimation_centers.shape[0]
        ring_length_ = np.array([ring_length for i in range(data_cap)])
        ring_lengths.append(ring_length_)

    ring_lengths = np.array(ring_lengths)
    ring_lengths = ring_lengths.reshape(ring_lengths.shape[0]*ring_lengths.shape[1])
    print(ring_lengths)

    diverse_set_trajectoies_mat = []
    for i in range(len(diverse_set_trajectoies)):
        length_aligned_trj = preprocess(diverse_set_trajectoies[i], 15)
        diverse_set_trajectoies_mat.append(length_aligned_trj)
        # print("diverse_set_trajectoies_mat[i].shape: ", diverse_set_trajectoies_mat[i].shape)
    diverse_set_trajectoies_mat = np.concatenate(diverse_set_trajectoies_mat, axis=0)
    print("diverse_set_trajectoies_mat.shape: ", diverse_set_trajectoies_mat.shape)


    # 将 rnn_limit_rings_of_best_estimation_centers 所有元素拼接起来
    rnn_limit_rings_of_best_estimation_center_mat = np.concatenate(rnn_limit_rings_of_best_estimation_centers, axis=0)
    print("rnn_limit_rings_of_best_estimation_center_mat.shape: ", rnn_limit_rings_of_best_estimation_center_mat.shape)

    # 对 rnn_limit_rings_of_best_estimation_center_mat 进行 PCA
    pca = PCA()
    pca.fit(rnn_limit_rings_of_best_estimation_center_mat)
    rnn_limit_rings_of_best_estimation_center_mat_pca = pca.transform(rnn_limit_rings_of_best_estimation_center_mat)
    print("rnn_limit_rings_of_best_estimation_center_mat_pca.shape: ", rnn_limit_rings_of_best_estimation_center_mat_pca.shape)

    # 计算角度映射
    rnn_limit_rings_of_best_estimation_center_mat_pca_2d = rnn_limit_rings_of_best_estimation_center_mat_pca[:, :2]
    # 计算 rnn_limit_rings_of_best_estimation_center_mat_pca_3d 中每个点相对于中心点的角度
    rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles = []
    for i in range(rnn_limit_rings_of_best_estimation_center_mat_pca_2d.shape[0]):
        rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles.append(np.arctan2(rnn_limit_rings_of_best_estimation_center_mat_pca_2d[i,1], rnn_limit_rings_of_best_estimation_center_mat_pca_2d[i,0]))
    rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles = np.array(rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles)
    # 将 rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles 转换到 [0, 2pi] 区间
    rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles = rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles - rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles.min()

    # rnn_limit_rings_of_best_estimation_center_mat_pca = pca0.transform(rnn_limit_rings_of_best_estimation_center_mat)

    centers_all = [[] for i in range(15)]


    # 按照 ring_lengths 给 rnn_limit_rings_of_best_estimation_center_mat_pca 分配颜色值
    # 先创建一个长度为15的颜色列表
    colors = []
    # 指定随机种子为100
    np.random.seed(100)
    for i in range(15):
        colors.append(np.random.rand(3))
    
    sectors = 16
    for random_angle in range(0,360):

        progress_bar(random_angle, 360)

        random_angle *= (np.pi / 180)
        sector_angle = np.pi / sectors
        # 选中所有角度在 [random_angle - sector_angle, random_angle + sector_angle] 之间的点
        idx = np.where((rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles >= random_angle - sector_angle) & (rnn_limit_rings_of_best_estimation_center_mat_pca_3d_angles <= random_angle + sector_angle))
        rnn_limit_rings_of_best_estimation_center_mat_selected = rnn_limit_rings_of_best_estimation_center_mat_pca[idx]
        seq_len_selected = ring_lengths[idx]

        # # 给 rnn_limit_rings_of_best_estimation_center_mat_pca 中的每个点分配颜色
        # rnn_limit_rings_of_best_estimation_center_mat_pca_colors = []
        # for i in range(rnn_limit_rings_of_best_estimation_center_mat_selected.shape[0]):
        #     rnn_limit_rings_of_best_estimation_center_mat_pca_colors.append(colors[seq_len_selected[i]-1])

        # 计算 rnn_limit_rings_of_best_estimation_center_mat_selected 中具有相同的 seq_len_selected 的中心点
        rnn_limit_rings_of_best_estimation_center_mat_selected_centers = []
        for i in range(1, 16):
            idx = np.where(seq_len_selected == i)
            rnn_limit_rings_of_best_estimation_center_mat_selected_centers.append(np.mean(rnn_limit_rings_of_best_estimation_center_mat_selected[idx], axis=0))
            
        rnn_limit_rings_of_best_estimation_center_mat_selected_centers = np.array(rnn_limit_rings_of_best_estimation_center_mat_selected_centers)

        vdim0 = 0
        vdim1 = 1
        vdim2 = 2

        for i in range(rnn_limit_rings_of_best_estimation_center_mat_selected_centers.shape[0]):
            point_3d = np.array(
                [
                    rnn_limit_rings_of_best_estimation_center_mat_selected_centers[i,vdim0], 
                    rnn_limit_rings_of_best_estimation_center_mat_selected_centers[i,vdim1], 
                    rnn_limit_rings_of_best_estimation_center_mat_selected_centers[i,vdim2]
                ]
            )
            centers_all[i].append(point_3d)

    centers_all = np.array(centers_all)
    print("centers_all.shape: ", centers_all.shape)

    return copy.deepcopy(centers_all), copy.deepcopy(configs), copy.deepcopy(rnn_limit_rings_of_best_estimation_center_mat)


if __name__ == "__main__":

    rpl_config = ReplayConfig()
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    ridge_images_pca_grouped, configs, ridge_images = compute_trajectory_manifold()

    centers_all, configs1, rnn_limit_rings_of_best_estimation_center_mat = compute_policyring_manifold()

    print("shape of ridge_images: ", ridge_images.shape)
    print("shape of rnn_limit_rings_of_best_estimation_center_mat: ", rnn_limit_rings_of_best_estimation_center_mat.shape)

    # 在 ridge_images 中加入平均噪声，分布在 [0.1, 0.2] 区间
    noise = np.random.uniform(-0.001, 0.001, size=ridge_images.shape)
    ridge_images = ridge_images + noise
    # 在 rnn_limit_rings_of_best_estimation_center_mat 中加入平均噪声，分布在 [-0.1, 0.1] 区间
    noise = np.random.uniform(-0.001, 0.001, size=rnn_limit_rings_of_best_estimation_center_mat.shape)
    rnn_limit_rings_of_best_estimation_center_mat = rnn_limit_rings_of_best_estimation_center_mat + noise

    train_view_1 = rnn_limit_rings_of_best_estimation_center_mat
    train_view_2 = ridge_images

    print("shape of train_view_1: ", train_view_1.shape)
    print("shape of train_view_2: ", train_view_2.shape)

    # 对 train_view_1/2 进行 pca
    pca = PCA()
    pca.fit(train_view_1)
    train_view_1 = pca.transform(train_view_1)
    pca.fit(train_view_2)
    train_view_2 = pca.transform(train_view_2)

    # using CCA from article 'Preserved neural dynamics across animals performing similar behaviour'
    A, B, r, U, V = canoncorr(train_view_1, train_view_2, fullReturn=True)
    print(A.shape, B.shape, r.shape, U.shape, V.shape)

    scores = r[:10]

    # 绘制 scores 的棒棒糖图
    plot_lollipop(scores, title="Canonical Correlation Analysis (CCA) of Ridge Representation and Policy Ring Manifolds "+' ('+args.nn_type+')', xlabel="Canonical mode", ylabel="Correlation", figsize=(12, 6))

    RI_ccs = B.T
    print("RI_ccs.shape: ", RI_ccs.shape)

    # 计算 A 的行列式的值
    det = np.linalg.det(A)
    print("Determinant of A: ", det)
    det = np.linalg.det(B.T@B)
    print("Determinant of B: ", det)

    # RI_ccs 是一个 128x441 的矩阵，每一行是一个模式
    # 现在需要把所有的模式，绘制成曲线，每一条曲线代表一个模式，画到同一个画面中
    for i in range(RI_ccs.shape[0]):
        plt.plot(RI_ccs[i], label="mode "+str(i))
    plt.show()

    for i in range(100):
        # 随机取两个模式，计算它们的点积
        idx1 = np.random.randint(0, 128)
        idx2 = np.random.randint(0, 128)
        dot_product = np.dot(RI_ccs[idx1], RI_ccs[idx2])
        print(f"mode {idx1} and mode {idx2} dot product: {dot_product}")


    for i in range(2):

        cm1, cm2, cm3 = i * 3, i * 3 + 1, i * 3 + 2

        # 将 U 和 V 绘制到两个并排的 3D 图中，并且通过实现鼠标交互函数来同步两个图的旋转
        fig = plt.figure()
        ax1 = fig.add_subplot(121, projection='3d')
        ax2 = fig.add_subplot(122, projection='3d')

        # 定义鼠标事件处理函数
        def on_mouse_event(event):
            if event.inaxes == ax1:
                # 如果事件发生在ax1中，更新ax2的视角
                ax2.view_init(elev=ax1.elev, azim=ax1.azim)
            elif event.inaxes == ax2:
                # 如果事件发生在ax2中，更新ax1的视角
                ax1.view_init(elev=ax2.elev, azim=ax2.azim)
            fig.canvas.draw()
        # 绑定鼠标事件处理函数
        fig.canvas.mpl_connect('motion_notify_event', on_mouse_event)

        ax1.scatter(U[:, cm1], U[:, cm2], U[:, cm3], c='r', marker='o')
        ax2.scatter(V[:, cm1], V[:, cm2], V[:, cm3], c='b', marker='o')
        # ax1 的标题为 neural dynamics
        ax1.set_title('Neural Dynamics')
        # ax2 的标题为 solution structure
        ax2.set_title('Solution Structure')

        ax1.set_xlabel(f'Canonical mode {cm1}')
        ax1.set_ylabel(f'Canonical mode {cm2}')
        ax1.set_zlabel(f'Canonical mode {cm3}')
        ax2.set_xlabel(f'Canonical mode {cm1}')
        ax2.set_ylabel(f'Canonical mode {cm2}')
        ax2.set_zlabel(f'Canonical mode {cm3}')

        # 设置两个 view 为 top-down 视角
        ax1.view_init(elev=90, azim=0)
        ax2.view_init(elev=90, azim=0)

        plt.show()

    colors = []
    # 指定随机种子为100
    np.random.seed(100)
    for i in range(15):
        colors.append(np.random.rand(3))

    for i in range(2):

        cm1, cm2, cm3 = i * 3, i * 3 + 1, i * 3 + 2

        # 将 U 和 V 绘制到两个并排的 3D 图中，并且通过实现鼠标交互函数来同步两个图的旋转
        fig = plt.figure()
        ax1 = fig.add_subplot(121, projection='3d')
        ax2 = fig.add_subplot(122, projection='3d')

        # 定义鼠标事件处理函数
        def on_mouse_event(event):
            if event.inaxes == ax1:
                # 如果事件发生在ax1中，更新ax2的视角
                ax2.view_init(elev=ax1.elev, azim=ax1.azim)
            elif event.inaxes == ax2:
                # 如果事件发生在ax2中，更新ax1的视角
                ax1.view_init(elev=ax2.elev, azim=ax2.azim)
            fig.canvas.draw()
        # 绑定鼠标事件处理函数
        fig.canvas.mpl_connect('motion_notify_event', on_mouse_event)

        for j in range(10):
            start = j * (U.shape[0]//10)
            end = (j+1) * (U.shape[0]//10)
            ax1.scatter(U[start:end, cm1], U[start:end, cm2], U[start:end, cm3], c=colors[j], marker='o')
            ax2.scatter(V[start:end, cm1], V[start:end, cm2], V[start:end, cm3], c=colors[j], marker='o')

        # ax1 的标题为 neural dynamics
        ax1.set_title('Neural Dynamics')
        # ax2 的标题为 solution structure
        ax2.set_title('Solution Structure')

        ax1.set_xlabel(f'Canonical mode {cm1}')
        ax1.set_ylabel(f'Canonical mode {cm2}')
        ax1.set_zlabel(f'Canonical mode {cm3}')
        ax2.set_xlabel(f'Canonical mode {cm1}')
        ax2.set_ylabel(f'Canonical mode {cm2}')
        ax2.set_zlabel(f'Canonical mode {cm3}')

        # 设置两个 view 为 top-down 视角
        ax1.view_init(elev=90, azim=0)
        ax2.view_init(elev=90, azim=0)

        plt.show()

    # def del_diag(matrix):
    #     new_matrix = np.zeros((matrix.shape[0], matrix.shape[1] - 1))
    #     for i in range(matrix.shape[0]):
    #         for j in range(matrix.shape[1]):
    #             if i != j:
    #                 new_matrix[i, j if j < i else j - 1] = matrix[i, j]
    #     return new_matrix
    
    # U = U[:, :6]
    # V = V[:, :6]

    # U_dist_mat = squareform(pdist(U, 'euclidean'))
    # V_dist_mat = squareform(pdist(V, 'euclidean'))
    # U_dist_mat = del_diag(U_dist_mat)
    # V_dist_mat = del_diag(V_dist_mat)

    # print("U_dist_mat.shape: ", U_dist_mat.shape)
    # print("V_dist_mat.shape: ", V_dist_mat.shape)

    # corr, _  = pearsonr(U_dist_mat.flatten(), V_dist_mat.flatten())
    # print("correlation between U and V distance matrix: ", corr)