from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal


def main():

    seq_len = 15
    redundancy = 5
    diverse_set_capacity = 5

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    def load_data_and_compute(nn_type, seq_len, redundancy, diverse_set_capacity):

        rnn_limit_rings_file_name = "./logs/rnn_limit_rings_of_best_estimation_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
        # 载入 npz 文件
        rnn_limit_rings_of_best_estimation_file = np.load(rnn_limit_rings_file_name)

        # 获取 npz 文件中的所有对象名称
        matrix_names = rnn_limit_rings_of_best_estimation_file.files

        rnn_limit_rings_of_best_estimation = []

        # 遍历对象名称，访问和操作每个矩阵对象
        for name in matrix_names:
            matrix = rnn_limit_rings_of_best_estimation_file[name]
            rnn_limit_rings_of_best_estimation.append(matrix)

        # 求 rnn_limit_rings_of_best_estimation 的中心位置序列
        rnn_limit_rings_of_best_estimation_center = []
        for i in range(len(rnn_limit_rings_of_best_estimation)):
            rnn_limit_rings_of_best_estimation_center.append(np.mean(rnn_limit_rings_of_best_estimation[i], axis=(0,1)))
            # print("shape of rnn_limit_rings_of_best_estimation[i] is: ", rnn_limit_rings_of_best_estimation[i].shape)
        rnn_limit_rings_of_best_estimation_center = np.array(rnn_limit_rings_of_best_estimation_center)
        print("rnn_limit_rings_of_best_estimation_center.shape: ", rnn_limit_rings_of_best_estimation_center.shape)

        # load obs data
        file_name = "obs_records_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
        obs_file = np.load("./logs/" + file_name)
        # 获取 npz 文件中的所有对象名称
        matrix_names = obs_file.files

        # 遍历对象名称，访问和操作每个矩阵对象
        obs_seq = []
        for name in matrix_names:
            matrix = obs_file[name]
            obs_seq.append(matrix)

        return rnn_limit_rings_of_best_estimation_center, copy.deepcopy(rnn_limit_rings_of_best_estimation), copy.deepcopy(obs_seq)
    
    configs = [

        # [nn_type, 6, 1, 100],
        [nn_type, 7, 1, 100],
        # [nn_type, 8, 1, 100],
        # [nn_type, 9, 1, 100],
        # [nn_type, 10, 1, 100],
        # [nn_type, 11, 1, 100],
        # [nn_type, 12, 1, 100],
        # [nn_type, 13, 1, 100],
        # [nn_type, 14, 1, 100],
        # [nn_type, 15, 1, 100],
        
        ]

    rnn_limit_rings_of_best_estimation_centers = []
    policy_rings_raw_data = []
    obs_seq_raw_data = []
    for i in range(len(configs)):
        centers, raw_data, obs_seq = load_data_and_compute(configs[i][0], configs[i][1], configs[i][2], configs[i][3])
        rnn_limit_rings_of_best_estimation_centers.append(centers)
        policy_rings_raw_data.append(raw_data)
        obs_seq_raw_data.append(obs_seq)

    # 将 rnn_limit_rings_of_best_estimation_centers 所有元素拼接起来
    rnn_limit_rings_of_best_estimation_center_mat = np.concatenate(rnn_limit_rings_of_best_estimation_centers, axis=0)

    # 对 rnn_limit_rings_of_best_estimation_center_mat 进行 PCA
    pca = PCA()
    pca.fit(rnn_limit_rings_of_best_estimation_center_mat)

    # 获取 pca 的前两个主成分
    pca_components = pca.components_
    pca_components = pca_components[:2]
    # 将这两个主成分向量，作为两个数组，用 bar plot 进行可视化
    fig = plt.figure()
    ax = fig.add_subplot(121)
    ax.bar(np.arange(len(pca_components[0])), pca_components[0])
    ax1 = fig.add_subplot(122)
    ax1.bar(np.arange(len(pca_components[1])), pca_components[1])
    plt.show()


    # 使用 PCA 对 rnn_limit_rings_of_best_estimation_centers 中的所有数据，逐个进行降维
    rnn_limit_rings_of_best_estimation_centers_pca = []
    for i in range(len(rnn_limit_rings_of_best_estimation_centers)):
        rnn_limit_rings_of_best_estimation_centers_pca.append(pca.transform(rnn_limit_rings_of_best_estimation_centers[i]))

    for r in policy_rings_raw_data:
        for i in range(len(r)):
            print("shape of r[i] is: ", r[i].shape)
    
    for r in obs_seq_raw_data:
        for i in range(len(r)):
            print("shape of obs[i] is: ", r[i].shape)

    rnd_idx = random.randint(0, len(policy_rings_raw_data[0])-1)
    print("rnd_idx: ", rnd_idx)
    policy_choosen = policy_rings_raw_data[0][rnd_idx]
    print("policy_choosen.shape: ", policy_choosen.shape)

    # 计算 policy_choosen 的中心位置序列
    policy_choosen_centers = np.mean(policy_choosen, axis=(1))
    print("policy_choosen_centers.shape: ", policy_choosen_centers.shape)
    policy_choosen_centers_pca = pca.transform(policy_choosen_centers)

    # 专门对 policy_choosen_centers 进行 PCA 并且可视化
    pca = PCA()
    pca.fit(policy_choosen_centers)
    policy_choosen_centers_pca = pca.transform(policy_choosen_centers)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.scatter(policy_choosen_centers_pca[:,0], policy_choosen_centers_pca[:,1], policy_choosen_centers_pca[:,2], s=5)
    ax.scatter(np.mean(policy_choosen_centers_pca[:,0]), np.mean(policy_choosen_centers_pca[:,1]), np.mean(policy_choosen_centers_pca[:,2]), s=100, c='r')
    plt.show()

    obs_seq_choosen = obs_seq_raw_data[0][rnd_idx]

    # 将 obs_seq_choosen 的后两个维度进行合并，例如从(a,b,c)变成(a,b*c)
    obs_seq_choosen_reshaped = np.reshape(obs_seq_choosen, (obs_seq_choosen.shape[0], obs_seq_choosen.shape[1]*obs_seq_choosen.shape[2]))
    print("obs_seq_choosen_reshaped.shape: ", obs_seq_choosen_reshaped.shape)

    # 计算 obs_seq_choosen_reshaped 的自相似矩阵
    obs_seq_choosen_reshaped_self_similarity_matrix = np.corrcoef(obs_seq_choosen_reshaped)
    # 计算 policy_choosen_centers 的自相似矩阵
    policy_choosen_centers_self_similarity_matrix = np.corrcoef(policy_choosen_centers)

    # 计算两个自相似矩阵的 mse
    def matrix_similarity(A, B):
        mse = np.mean((A - B) ** 2)
        return mse
    
    # 计算两个自相似矩阵的 mse
    obs_seq_choosen_reshaped_self_similarity_matrix_norm = obs_seq_choosen_reshaped_self_similarity_matrix / np.max(obs_seq_choosen_reshaped_self_similarity_matrix)
    policy_choosen_centers_self_similarity_matrix_norm = policy_choosen_centers_self_similarity_matrix / np.max(policy_choosen_centers_self_similarity_matrix)
    _mse = matrix_similarity(obs_seq_choosen_reshaped_self_similarity_matrix_norm, policy_choosen_centers_self_similarity_matrix_norm)
    print("mse: ", _mse)

    def shen_similarity(A, B):
        # A和B是两个NxN邻接矩阵
        min_ab = np.minimum(A, B) 
        max_ab = np.maximum(A, B)
        denom = max_ab.sum()
        numer = min_ab.sum()
        return numer/denom
    
    # 计算两个自相似矩阵的 shen 相似度
    _shen_similarity = shen_similarity(obs_seq_choosen_reshaped_self_similarity_matrix_norm, policy_choosen_centers_self_similarity_matrix_norm)
    print("shen_similarity: ", _shen_similarity)

    # 将两个自相似矩阵转换成两个向量，然后计算两个向量的pearson相关系数
    obs_seq_choosen_reshaped_self_similarity_matrix_norm_vector = obs_seq_choosen_reshaped_self_similarity_matrix_norm.reshape(-1)
    policy_choosen_centers_self_similarity_matrix_norm_vector = policy_choosen_centers_self_similarity_matrix_norm.reshape(-1)
    _pearsonr = np.corrcoef(obs_seq_choosen_reshaped_self_similarity_matrix_norm_vector, policy_choosen_centers_self_similarity_matrix_norm_vector)
    print("_pearsonr: ", _pearsonr)

    # 将 obs_seq_choosen_reshaped_self_similarity_matrix 和 policy_choosen_centers_self_similarity_matrix 进行可视化
    fig = plt.figure()
    ax = fig.add_subplot(121)
    ax.imshow(obs_seq_choosen_reshaped_self_similarity_matrix)
    ax1 = fig.add_subplot(122)
    ax1.imshow(policy_choosen_centers_self_similarity_matrix)

    # 给两个子图添加 title
    ax.set_title("obs_seq_choosen_reshaped_self_similarity_matrix")
    ax1.set_title("policy_choosen_centers_self_similarity_matrix")

    plt.show()

    # 计算所有序列的相似度
    mses = []
    shen_similarities = []
    pearsonrs = []
    for id in range(len(policy_rings_raw_data[0])):
        policy_choosen = policy_rings_raw_data[0][id]
        policy_choosen_centers = np.mean(policy_choosen, axis=(1))
        obs_seq_choosen = obs_seq_raw_data[0][id]
        obs_seq_choosen_reshaped = np.reshape(obs_seq_choosen, (obs_seq_choosen.shape[0], obs_seq_choosen.shape[1]*obs_seq_choosen.shape[2]))
        # 计算 obs_seq_choosen_reshaped 的自相似矩阵
        obs_seq_choosen_reshaped_self_similarity_matrix = np.corrcoef(obs_seq_choosen_reshaped)
        # 计算 policy_choosen_centers 的自相似矩阵
        policy_choosen_centers_self_similarity_matrix = np.corrcoef(policy_choosen_centers)

        obs_seq_choosen_reshaped_self_similarity_matrix_norm = obs_seq_choosen_reshaped_self_similarity_matrix / np.max(obs_seq_choosen_reshaped_self_similarity_matrix)
        policy_choosen_centers_self_similarity_matrix_norm = policy_choosen_centers_self_similarity_matrix / np.max(policy_choosen_centers_self_similarity_matrix)
        obs_seq_choosen_reshaped_self_similarity_matrix_norm_vector = obs_seq_choosen_reshaped_self_similarity_matrix_norm.reshape(-1)
        policy_choosen_centers_self_similarity_matrix_norm_vector = policy_choosen_centers_self_similarity_matrix_norm.reshape(-1)

        _mse = matrix_similarity(obs_seq_choosen_reshaped_self_similarity_matrix_norm, policy_choosen_centers_self_similarity_matrix_norm)
        _shen_similarity = shen_similarity(obs_seq_choosen_reshaped_self_similarity_matrix_norm, policy_choosen_centers_self_similarity_matrix_norm)
        _pearsonr = np.corrcoef(obs_seq_choosen_reshaped_self_similarity_matrix_norm_vector, policy_choosen_centers_self_similarity_matrix_norm_vector)

        mses.append(_mse)
        shen_similarities.append(_shen_similarity)
        pearsonrs.append(_pearsonr[0,1])

    # 将 mses, shen_similarities, pearsonrs 进行可视化
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(mses, label="mse")
    ax.plot(shen_similarities, label="shen_similarity")
    ax.plot(pearsonrs, label="pearsonr")
    ax.legend()
    plt.show()


    # 计算所有 obs 序列和随机向量的相似度
    mses = []
    shen_similarities = []
    pearsonrs = []
    for id in range(len(policy_rings_raw_data[0])):
        policy_choosen_centers = np.mean(policy_rings_raw_data[0][id], axis=(1))
        # 生成和 policy_choosen_centers 形状一样的随机向量，其中的元素都是从 [-1,1] 中均匀采样
        policy_choosen_centers_random = np.random.uniform(-1, 1, policy_choosen_centers.shape)
        obs_seq_choosen = obs_seq_raw_data[0][id]
        obs_seq_choosen_reshaped = np.reshape(obs_seq_choosen, (obs_seq_choosen.shape[0], obs_seq_choosen.shape[1]*obs_seq_choosen.shape[2]))
        # 计算 obs_seq_choosen_reshaped 的自相似矩阵
        obs_seq_choosen_reshaped_self_similarity_matrix = np.corrcoef(obs_seq_choosen_reshaped)
        # 计算 policy_choosen_centers_random 的自相似矩阵
        policy_choosen_centers_random_self_similarity_matrix = np.corrcoef(policy_choosen_centers_random)

        obs_seq_choosen_reshaped_self_similarity_matrix_norm = obs_seq_choosen_reshaped_self_similarity_matrix / np.max(obs_seq_choosen_reshaped_self_similarity_matrix)
        policy_choosen_centers_random_self_similarity_matrix_norm = policy_choosen_centers_random_self_similarity_matrix / np.max(policy_choosen_centers_random_self_similarity_matrix)
        obs_seq_choosen_reshaped_self_similarity_matrix_norm_vector = obs_seq_choosen_reshaped_self_similarity_matrix_norm.reshape(-1)
        policy_choosen_centers_random_self_similarity_matrix_norm_vector = policy_choosen_centers_random_self_similarity_matrix_norm.reshape(-1)

        _mse = matrix_similarity(obs_seq_choosen_reshaped_self_similarity_matrix_norm, policy_choosen_centers_random_self_similarity_matrix_norm)
        _shen_similarity = shen_similarity(obs_seq_choosen_reshaped_self_similarity_matrix_norm, policy_choosen_centers_random_self_similarity_matrix_norm)
        _pearsonr = np.corrcoef(obs_seq_choosen_reshaped_self_similarity_matrix_norm_vector, policy_choosen_centers_random_self_similarity_matrix_norm_vector)

        mses.append(_mse)
        shen_similarities.append(_shen_similarity)
        pearsonrs.append(_pearsonr[0,1])

    # 将 mses, shen_similarities, pearsonrs 进行可视化
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(mses, label="mse")
    ax.plot(shen_similarities, label="shen_similarity")
    ax.plot(pearsonrs, label="pearsonr")
    ax.legend()
    plt.show()


if __name__ == "__main__":
    main()