from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from scipy.spatial import KDTree
from sklearn.linear_model import LinearRegression
import umap
from ripser import ripser
from persim import plot_diagrams
from scipy.spatial.distance import pdist, squareform

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal


def main():

    dims = 6

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--sample_n", type=int, default=1000)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration
    sample_n = args.sample_n

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    def load_data_and_compute(nn_type, seq_len, redundancy, diverse_set_capacity):

        rnn_limit_rings_file_name = "./logs/rnn_limit_rings_of_best_estimation_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
        # 载入 npz 文件
        rnn_limit_rings_of_best_estimation_file = np.load(rnn_limit_rings_file_name)

        # 获取 npz 文件中的所有对象名称
        matrix_names = rnn_limit_rings_of_best_estimation_file.files

        rnn_limit_rings_of_best_estimation = []

        # 遍历对象名称，访问和操作每个矩阵对象
        for name in matrix_names:
            matrix = rnn_limit_rings_of_best_estimation_file[name]
            # 在这里进行对矩阵对象的操作
            # 例如，打印矩阵的形状
            # print(f"Matrix '{name}' shape: {matrix.shape}")
            rnn_limit_rings_of_best_estimation.append(matrix)

        # 求 rnn_limit_rings_of_best_estimation 的中心位置序列
        rnn_limit_rings_of_best_estimation_center = []
        for i in range(len(rnn_limit_rings_of_best_estimation)):
            rnn_limit_rings_of_best_estimation_center.append(np.mean(rnn_limit_rings_of_best_estimation[i], axis=(0,1)))
            # print("shape of rnn_limit_rings_of_best_estimation[i] is: ", rnn_limit_rings_of_best_estimation[i].shape)
        rnn_limit_rings_of_best_estimation_center = np.array(rnn_limit_rings_of_best_estimation_center)
        print("rnn_limit_rings_of_best_estimation_center.shape: ", rnn_limit_rings_of_best_estimation_center.shape)

        # load obs data
        file_name = "obs_data_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
        obs_file = np.load("./logs/" + file_name)
        obs_data = obs_file["obs_data"]
        diverse_set_trajectoies = obs_file["diverse_set_trajectoies"]
        diverse_set_actions = obs_file["diverse_set_actions"]

        print("shape of obs_data is: ", obs_data.shape)

        return rnn_limit_rings_of_best_estimation_center, copy.deepcopy(rnn_limit_rings_of_best_estimation)
    
    configs = [

        [nn_type, 6, 1, 100],
        # [nn_type, 7, 1, 100],
        # [nn_type, 8, 1, 100],
        # [nn_type, 9, 1, 100],
        # [nn_type, 10, 1, 100],
        # [nn_type, 11, 1, 100],
        # [nn_type, 12, 1, 100],
        # [nn_type, 13, 1, 100],
        # [nn_type, 14, 1, 100],
        # [nn_type, 15, 1, 100],
        
        ]

    rnn_limit_rings_of_best_estimation_centers = []
    policy_rings_raw_data = []
    for i in range(len(configs)):
        centers, raw_data = load_data_and_compute(configs[i][0], configs[i][1], configs[i][2], configs[i][3])
        rnn_limit_rings_of_best_estimation_centers.append(centers)
        policy_rings_raw_data.append(raw_data)

    # 将 rnn_limit_rings_of_best_estimation_centers 所有元素拼接起来
    rnn_limit_rings_of_best_estimation_center_mat = np.concatenate(rnn_limit_rings_of_best_estimation_centers, axis=0)

    # 对 rnn_limit_rings_of_best_estimation_center_mat 进行 PCA
    pca = PCA()
    pca.fit(rnn_limit_rings_of_best_estimation_center_mat)

    # 打印pca方差比例
    print("pca.explained_variance_ratio_: ", pca.explained_variance_ratio_)

    # 使用 PCA 对 rnn_limit_rings_of_best_estimation_centers 中的所有数据，逐个进行降维
    rnn_limit_rings_of_best_estimation_centers_pca = []
    for i in range(len(rnn_limit_rings_of_best_estimation_centers)):
        rnn_limit_rings_of_best_estimation_centers_pca.append(pca.transform(rnn_limit_rings_of_best_estimation_centers[i]))

    rnd_idx = random.randint(0, len(policy_rings_raw_data[0])-1)
    print("rnd_idx: ", rnd_idx)
    policy_choosen = policy_rings_raw_data[0][rnd_idx]
    print("policy_choosen.shape: ", policy_choosen.shape)

    policy_rings_raw_data_pca = []
    policy_rings_raw_data_flat_contat = []
    limit_ring_centers = []
    for j in range(len(policy_rings_raw_data[0])):

        ring_centers = np.mean(policy_rings_raw_data[0][j], axis=(1))
        print("shape of ring_centers : ", ring_centers.shape)
        limit_ring_centers.append(ring_centers)

        policy_rings_raw_data_flat = policy_rings_raw_data[0][j].reshape(-1, policy_rings_raw_data[0][j].shape[-1])
        policy_rings_raw_data_pca.append(pca.transform(policy_rings_raw_data_flat))
        policy_rings_raw_data_flat_contat.append(policy_rings_raw_data_flat)

    policy_rings_raw_data_flat_contat = np.concatenate(policy_rings_raw_data_flat_contat, axis=0)
    print("policy_rings_raw_data_flat_contat.shape: ", policy_rings_raw_data_flat_contat.shape)

    # 将 limit_ring_centers 所有元素拼接起来
    limit_ring_centers_mat = np.concatenate(limit_ring_centers, axis=0)
    print("limit_ring_centers_mat.shape: ", limit_ring_centers_mat.shape)

    # limit_ring_centers_pca = PCA()
    # limit_ring_centers_pca.fit(limit_ring_centers_mat)
    # limit_ring_centers_mat_pca = limit_ring_centers_pca.transform(limit_ring_centers_mat)

    # # 打印pca方差比例
    # print("limit_ring_centers_pca.explained_variance_ratio_: ", limit_ring_centers_pca.explained_variance_ratio_)

    # limit_ring_centers_mat_pca[:,dims:] = 0
    # limit_ring_centers_mat_clean = limit_ring_centers_pca.inverse_transform(limit_ring_centers_mat_pca)

    # dist_matrix = squareform(pdist(limit_ring_centers_mat_clean))

    # # 计算 persistence diagram
    # # dgms = ripser(data, maxdim=2, coeff=47)['dgms']
    # dgms = ripser(dist_matrix, maxdim=1, coeff=47,do_cocycles= True, distance_matrix=True)['dgms']
    # plot_diagrams(dgms, show=True, lifetime=True)

    # # 对 limit_ring_centers_mat_clean 进行 UMAP 降维
    # reducer = umap.UMAP(n_components=3, n_neighbors=20, min_dist=0.1, init='spectral')
    # embedding = reducer.fit_transform(limit_ring_centers_mat_clean)

    # # 将 embedding 进行 3D 可视化
    # fig = plt.figure()
    # ax = fig.add_subplot(111, projection='3d')
    # ax.scatter(embedding[:, 0], embedding[:, 1], embedding[:, 2], c='b', s=1)
    # plt.show()

    # # 将 policy_rings_raw_data_pca 展开成二维数组
    # policy_rings_raw_data_pca_arr = policy_rings_raw_data_pca[0]
    # for i in range(1, len(policy_rings_raw_data_pca)):
    #     policy_rings_raw_data_pca_arr = np.concatenate((policy_rings_raw_data_pca_arr, policy_rings_raw_data_pca[i]), axis=0)

    # 将 policy_rings_raw_data_flat_contat 进行 PCA，然后只保留前6个主成分，然后把数据还原回原始空间中
    pca = PCA()
    pca.fit(policy_rings_raw_data_flat_contat)
    # 打印前6个主成分的方差比例
    print("pca.explained_variance_ratio_[:dims]: ", pca.explained_variance_ratio_[:dims])
    policy_rings_raw_data_flat_contat_pca = pca.transform(policy_rings_raw_data_flat_contat)
    policy_rings_raw_data_flat_contat_pca[:,dims:] = 0
    policy_rings_raw_data_flat_contat_clean = pca.inverse_transform(policy_rings_raw_data_flat_contat_pca)

    # 对 policy_rings_raw_data_flat_contat_clean 进行随机采样，采样1000个点
    idx = np.random.choice(policy_rings_raw_data_flat_contat_clean.shape[0], sample_n, replace=False)
    policy_rings_raw_data_flat_contat_clean_choosen = policy_rings_raw_data_flat_contat_clean[idx]

    # # 将 policy_rings_raw_data_flat_contat_clean_choosen 进行 PCA，然后可视化
    # pca = PCA()
    # pca.fit(policy_rings_raw_data_flat_contat_clean_choosen)
    # policy_rings_raw_data_flat_contat_clean_choosen_pca = pca.transform(policy_rings_raw_data_flat_contat_clean_choosen)

    # # 将 policy_rings_raw_data_flat_contat_clean_choosen_pca 进行3D可视化
    # fig = plt.figure()
    # ax = fig.add_subplot(111, projection='3d')
    # ax.scatter(policy_rings_raw_data_flat_contat_clean_choosen_pca[:,0], policy_rings_raw_data_flat_contat_clean_choosen_pca[:,1], policy_rings_raw_data_flat_contat_clean_choosen_pca[:,2], c='r', s=5)
    # plt.show()

    dist_matrix = squareform(pdist(policy_rings_raw_data_flat_contat_clean_choosen))

    print("dist_matrix.shape: ", dist_matrix.shape)

    # 计算 persistence diagram
    # dgms = ripser(dist_matrix, maxdim=2, coeff=47)['dgms']
    dgms = ripser(dist_matrix, maxdim=2, coeff=47,do_cocycles= True, distance_matrix=True)['dgms']
    # plot_diagrams(dgms, show=True, lifetime=True)

    # 将 dgms 保存为一个 npz 文件
    data = {}
    for i, matrix in enumerate(dgms):
        # 将每个矩阵存储到字典中的对应键名
        data[f'dgms_{i}'] = matrix
    
    file_name = "./logs/persistence_diagram_" + rpl_config.nn_type + "_" + str(rpl_config.nn_size) + "_" + str(sample_n) + ".npz"
    np.savez(file_name, **data)

    # # 将 policy_rings_raw_data_flat_contat_pca 进行3D可视化
    # fig = plt.figure()
    # ax = fig.add_subplot(111, projection='3d')
    # ax.scatter(policy_rings_raw_data_flat_contat_pca[:,0], policy_rings_raw_data_flat_contat_pca[:,1], policy_rings_raw_data_flat_contat_pca[:,2], c='r', s=5)
    # plt.show()

    # # Perform UMAP dimensionality reduction
    # reducer = umap.UMAP(n_components=3, n_neighbors=20, min_dist=0.1, init='spectral')
    # embedding = reducer.fit_transform(policy_rings_raw_data_flat_contat_clean)

    # print("embedding.shape: ", embedding.shape)

    # # 对 embedding 进行随机采样，采样1000个点
    # idx = np.random.choice(embedding.shape[0], sample_n, replace=False)
    # embedding = embedding[idx]

    # # # Visualize the embedding in 3D space
    # # fig = plt.figure()
    # # ax = fig.add_subplot(111, projection='3d')
    # # ax.scatter(embedding[:, 0], embedding[:, 1], embedding[:, 2], c='b', s=1)
    # # plt.show()

    # dist_matrix = squareform(pdist(embedding))
    # print("dist_matrix.shape: ", dist_matrix.shape)

    # # 计算 persistence diagram
    # # dgms = ripser(data, maxdim=2, coeff=47)['dgms']
    # dgms = ripser(dist_matrix, maxdim=2, coeff=47, distance_matrix=True)['dgms']
    # plot_diagrams(dgms, show=True, lifetime=True)


if __name__ == "__main__":
    main()