from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
from scipy.spatial import KDTree
from sklearn.linear_model import LinearRegression
from scipy.stats import norm

# analysis of phase space

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal

# 全局变量，用于存储和图像显示线程交互的数据
class imgview:
    global_image = None
    imgview_exit = False
    trajectory = []
    focus_i = 0
    traj_i = 0

imgview_data = imgview()

# 定义一个函数，用于在独立线程中显示图像
def show_image():
    grid_size_display = 20
    while not imgview_data.imgview_exit:
        # 检查全局变量是否有图像
        if imgview_data.global_image is not None:
            img = np.copy(imgview_data.global_image)
            state_x = imgview_data.trajectory[imgview_data.traj_i][0]
            state_y = imgview_data.trajectory[imgview_data.traj_i][1]
            cv2.circle(img, (state_y * grid_size_display + int(grid_size_display/2), state_x * grid_size_display + int(grid_size_display/2)), 7, (0, 0, 255), -1, cv2.LINE_AA)
            # 显示图像
            cv2.imshow("Image", img)
            key = cv2.waitKey(1)
            if key == ord('a'):
                imgview_data.focus_i -= 1
                print("imgview_data.focus_i: ", imgview_data.focus_i)
            elif key == ord('d'):
                imgview_data.focus_i += 1
                print("imgview_data.focus_i: ", imgview_data.focus_i)
        else:
            # 图像还未产生，等待100毫秒
            time.sleep(0.1)

def ivf():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    """ 1.
    """

    def load_data(nn_type, seq_len, redundancy, diverse_set_capacity):

        rnn_limit_rings_file_name = "./logs/rnn_limit_ring_collection_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"

        # 载入 npz 文件
        rnn_limit_rings_file = np.load(rnn_limit_rings_file_name)

        # 获取 npz 文件中的所有对象名称
        matrix_names = rnn_limit_rings_file.files

        rnn_limit_rings = []

        # 遍历对象名称，访问和操作每个矩阵对象
        for name in matrix_names:
            matrix = rnn_limit_rings_file[name]
            rnn_limit_rings.append(matrix)
            # print("shape of matrix: ", np.shape(matrix))

        rnn_limit_rings = np.array(rnn_limit_rings)
        return rnn_limit_rings
    
    configs = [

        [nn_type, 6, 1, 100],
        # [nn_type, 7, 1, 100],
        # [nn_type, 8, 1, 100],
        # [nn_type, 9, 1, 100],
        # [nn_type, 10, 1, 100],
        # [nn_type, 11, 1, 100],
        # [nn_type, 12, 1, 100],
        # [nn_type, 13, 1, 100],
        # [nn_type, 14, 1, 100],
        # [nn_type, 15, 1, 100],
        
        ]
    
    rnn_limit_rings_collection = []
    for i in range(len(configs)):
        raw_data_matrix = load_data(configs[i][0], configs[i][1], configs[i][2], configs[i][3])
        raw_data_linear = raw_data_matrix.reshape(raw_data_matrix.shape[0]*raw_data_matrix.shape[1]*raw_data_matrix.shape[2]*raw_data_matrix.shape[3],raw_data_matrix.shape[4])
        rnn_limit_rings_collection.append(raw_data_linear)

    # 将 rnn_limit_rings_collection 的所有元素拼接起来
    rnn_limit_rings_collection_all = np.concatenate(rnn_limit_rings_collection, axis=0)
    print("shape of rnn_limit_rings_collection_all: ", rnn_limit_rings_collection_all.shape)

    # 对 rnn_limit_rings_collection 进行PCA
    pca = PCA()
    pca.fit(rnn_limit_rings_collection_all)
    # rnn_limit_rings_collection_pca = pca.transform(rnn_limit_rings_collection_all)

    # 提取前三个主成分
    PC1 = pca.components_[0]
    PC2 = pca.components_[1]
    PC3 = pca.components_[2]

    n_samples = 16000
    rnd_idx_all = np.random.choice(rnn_limit_rings_collection_all.shape[0], n_samples, replace=False)
    rnn_limit_rings_collection_chosen = rnn_limit_rings_collection_all[rnd_idx_all]

    print("shape of rnn_limit_rings_collection_chosen: ", rnn_limit_rings_collection_chosen.shape)

    rnn_limit_rings_collection_chosen_pca = pca.transform(rnn_limit_rings_collection_chosen)

    print("variance ratio of PC1: ", pca.explained_variance_ratio_[0])
    print("variance ratio of PC2: ", pca.explained_variance_ratio_[1])
    print("variance ratio of PC3: ", pca.explained_variance_ratio_[2])
    print("variance ratio of PC4: ", pca.explained_variance_ratio_[3])
    print("variance ratio of PC5: ", pca.explained_variance_ratio_[4])
    print("variance ratio of PC6: ", pca.explained_variance_ratio_[5])


    """ 2. 
    """

    obs_zero = jnp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])

    intr_field = []

    for t in range(rnn_limit_rings_collection_chosen.shape[0]):

        progress_bar(t, rnn_limit_rings_collection_chosen.shape[0])

        """ model forward 
        """
        rnn_state_infer, y1 = model_forward(params, rnn_limit_rings_collection_chosen[t], obs_zero, model)
        intr_field.append(rnn_state_infer - rnn_limit_rings_collection_chosen[t])

    # 将 intr_field 展开成 rnn_state_np 的形状
    intr_field_np = np.array(intr_field)
    print("shape of intr_field_np: ", intr_field_np.shape)

    def estimate_tangent_vector(estimation_centers, rnd_idx):
        """Estimates the tangent vector of a point on a high-dimensional ring.

        Args:
            estimation_centers: A (100, 128) numpy array of points on the ring.
            rnd_idx: A random index into the estimation_centers array.

        Returns:
            A (128,) numpy array representing the estimated tangent vector.
        """

        # Build a KDTree for efficient nearest neighbor search.
        tree = KDTree(estimation_centers)

        # Find the k nearest neighbors of the point with index rnd_idx.
        k = int(estimation_centers.shape[0]/8)
        _, neighbors = tree.query(estimation_centers[rnd_idx], k=k)

        # Find the pair of neighbors that are farthest apart.
        max_distance = 0
        farthest_pair = None
        for i in range(k):
            for j in range(k):
                distance = np.linalg.norm(estimation_centers[neighbors[i]] - estimation_centers[neighbors[j]])
                if distance > max_distance:
                    max_distance = distance
                    farthest_pair = (neighbors[i], neighbors[j])

        # Estimate the tangent vector as the vector between the two farthest neighbors.
        tangent_vector = estimation_centers[farthest_pair[1]] - estimation_centers[farthest_pair[0]]
        
        # Normalize the tangent vector.
        tangent_vector =  tangent_vector/np.linalg.norm(tangent_vector)

        return tangent_vector

    # # 统计 intr_field_np 的模长分布情况，并绘制直方图
    # plt.figure(figsize=(10, 10))
    # plt.hist(np.linalg.norm(intr_field_np, axis=1), bins=100)
    # plt.show()

    # 计算局部平均模长
    radius = 1
    keep_ratio = 0.01

    local_average_norm = []
    for i in range(intr_field_np.shape[0]):
        progress_bar(i, intr_field_np.shape[0])
        distances = np.linalg.norm(intr_field_np - intr_field_np[i], axis=1)
        within_radius_indices = np.where(distances <= radius)[0]
        local_average_norm.append(np.mean(np.linalg.norm(intr_field_np[within_radius_indices], axis=1)))

    # 筛选出 local_average_norm 中最小的10%的索引
    indices = np.argsort(local_average_norm)
    indices = indices[0:int(len(indices)*keep_ratio)]
    print("shape of indices: ", indices.shape)

    rnn_limit_rings_collection_chosen_slow_points_pca = rnn_limit_rings_collection_chosen_pca[indices]
    rnn_limit_rings_collection_chosen_slow_points = rnn_limit_rings_collection_chosen[indices]

    view_dim0 = 0
    view_dim1 = 1
    view_dim2 = 2

    # 将内禀向量场投影到主成分空间
    intr_pca = pca.transform(intr_field_np + pca.mean_)

    # 计算 intr_pca 中的每个向量与 z 轴的夹角
    vertical_vector = np.array([0, 0, 1])
    cos_theta = []
    for i in range(intr_pca.shape[0]):
        cos_theta_rad = np.dot(intr_pca[i,0:3], vertical_vector) / (np.linalg.norm(intr_pca[i,0:3]) * np.linalg.norm(vertical_vector))
        cos_theta.append(cos_theta_rad)

    # 将 cos_theta 绘制成 matplotlib 能够直接用的 color, 颜色方案采用从红色到蓝色的渐变
    colors = []
    for i in range(intr_pca.shape[0]):
        color = plt.cm.jet((cos_theta[i] + 1) / 2)
        colors.append(color)
    

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    
    ax.quiver(rnn_limit_rings_collection_chosen_pca[:, view_dim0], rnn_limit_rings_collection_chosen_pca[:, view_dim1], rnn_limit_rings_collection_chosen_pca[:, view_dim2],
                intr_pca[:, view_dim0],
                intr_pca[:, view_dim1],
                intr_pca[:, view_dim2], color='none', edgecolor=colors, length=0.3, arrow_length_ratio=0.3, alpha=0.1)
    
    ax.scatter(rnn_limit_rings_collection_chosen_slow_points_pca[:, view_dim0], rnn_limit_rings_collection_chosen_slow_points_pca[:, view_dim1], rnn_limit_rings_collection_chosen_slow_points_pca[:, view_dim2], c='r', s=10)

    plt.show()

    # """ 3.
    # """

    orthogonality_PC1 = []
    for i in range(rnn_limit_rings_collection_chosen.shape[0]):
        # 计算 PC1 与 intr_field_np[i] 的归一化后的内积
        dot_product = np.dot(PC1, intr_field_np[i]) / (np.linalg.norm(PC1) * np.linalg.norm(intr_field_np[i]))
        orthogonality_PC1.append(dot_product)

    orthogonality_PC2 = []
    for i in range(rnn_limit_rings_collection_chosen.shape[0]):
        # 计算 PC2 与 intr_field_np[i] 的归一化后的内积
        dot_product = np.dot(PC2, intr_field_np[i]) / (np.linalg.norm(PC2) * np.linalg.norm(intr_field_np[i]))
        orthogonality_PC2.append(dot_product)

    # 制造两组随机128维向量，每组100个，每个维度分布在[-1,1]之间
    n_ = int(np.sqrt(len(orthogonality_PC1)))
    random_vectors_A, random_vectors_B = np.random.uniform(-1, 1, size=(n_, 128)), np.random.uniform(-1, 1, size=(n_, 128))
    # 计算两组随机向量之间的点积
    orthogonality_rnd = []
    for i in range(random_vectors_A.shape[0]):
        for j in range(random_vectors_B.shape[0]):
            v1_norm = random_vectors_A[i]/np.linalg.norm(random_vectors_A[i])
            v2_norm = random_vectors_B[j]/np.linalg.norm(random_vectors_B[j])
            orthogonality_rnd.append(np.dot(v1_norm, v2_norm))
    orthogonality_rnd = np.array(orthogonality_rnd)
    orthogonality_rnd = orthogonality_rnd.reshape(-1)

    # 统计 orthogonality 和 orthogonality_rnd 的直方图分布
    fig = plt.figure()
    ax = fig.add_subplot(121)
    ax.hist(orthogonality_rnd, bins=100, label="orthogonality_rnd", alpha=0.5)
    ax.hist(orthogonality_PC1, bins=100, label="orthogonality_PC1", alpha=0.5)
    ax.legend()

    ax2 = fig.add_subplot(122)
    ax2.hist(orthogonality_rnd, bins=100, label="orthogonality_rnd", alpha=0.5)
    ax2.hist(orthogonality_PC2, bins=100, label="orthogonality_PC2", alpha=0.5)
    ax2.legend()

    plt.show()

    # # 按照正态分布拟合 orthogonality_PC1 和 orthogonality_PC2，以及 orthogonality_rnd
    # # 先将三个分部转换成直方图统计
    # orthogonality_PC1_hist, _ = np.histogram(orthogonality_PC1, bins=1000)
    # orthogonality_PC2_hist, _ = np.histogram(orthogonality_PC2, bins=1000)
    # orthogonality_rnd_hist, _ = np.histogram(orthogonality_rnd, bins=1000)
    
    # # 对三个直方图进行归一化，使得三个直方图各自的面积之和为1
    # orthogonality_PC1_hist = orthogonality_PC1_hist / np.sum(orthogonality_PC1_hist)
    # orthogonality_PC2_hist = orthogonality_PC2_hist / np.sum(orthogonality_PC2_hist)
    # orthogonality_rnd_hist = orthogonality_rnd_hist / np.sum(orthogonality_rnd_hist)

    # # 拟合正态分布
    # params_PC1 = norm.fit(orthogonality_PC1)
    # params_PC2 = norm.fit(orthogonality_PC2)
    # params_rnd = norm.fit(orthogonality_rnd)

    # # 输出拟合参数
    # print("orthogonality_PC1 拟合参数：", params_PC1)
    # print("orthogonality_PC2 拟合参数：", params_PC2)
    # print("orthogonality_rnd 拟合参数：", params_rnd)


    # 统计 Z 轴的平行分布
    parrallel_distribution = []
    for i in range(intr_field_np.shape[0]):
        # 计算 PC3 与 intr_field_np[i] 的归一化后的内积
        dot_product = np.dot(PC3, intr_field_np[i]) / (np.linalg.norm(PC3) * np.linalg.norm(intr_field_np[i]))
        parrallel_distribution.append(abs(dot_product))

    random_vectors = np.random.uniform(-1, 1, size=(len(parrallel_distribution), 128))

    parrallel_distribution_rnd = []
    for i in range(random_vectors.shape[0]):
        dot_product = np.dot(PC3, random_vectors[i]) / (np.linalg.norm(PC3) * np.linalg.norm(random_vectors[i]))
        parrallel_distribution_rnd.append(abs(dot_product))

    parrallel_distribution = np.array(parrallel_distribution)
    parrallel_distribution_rnd = np.array(parrallel_distribution_rnd)
    

    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.hist(parrallel_distribution, bins=100, label="parrallel_distribution", alpha=0.5)
    ax.hist(parrallel_distribution_rnd, bins=100, label="parrallel_distribution_rnd", alpha=0.5)
    ax.legend()
    plt.show()


if __name__ == "__main__":
    
    ivf()