from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans

# analysis of effective actions

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal


def get_intrinsic_pc():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--device", type=int, default=rpl_config.device)
    parser.add_argument("--probe_point", type=int, default=rpl_config.probe_point)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration
    rpl_config.device = args.device
    rpl_config.probe_point = args.probe_point

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)
    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    n_samples = 5000
    k1 = npr.randint(0, 1000000)
    rnn_state = model.initial_state_rnd(n_samples, k1)
    rnn_state_old = rnn_state.copy()
    diff = jnp.abs(rnn_state - rnn_state_old)
    rnn_state_old = rnn_state.copy()
    diff_norm = jnp.linalg.norm(diff, axis=1)
    diff_norm_old = diff_norm.copy()
    norm_std = diff_norm.copy()

    rnn_state_init = rnn_state.copy()

    obs_zero = jnp.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0] for i in range(n_samples)])

    rnn_state_trajectory = []

    for t in range(rpl_config.life_duration):

        if t == rpl_config.probe_point:
            rnn_state_init = rnn_state.copy()

        progress_bar(t, rpl_config.life_duration)

        """ model forward 
        """
        rnn_state, y1 = model_forward(params, rnn_state, obs_zero, model)
        diff = jnp.abs(rnn_state - rnn_state_old)
        rnn_state_old = rnn_state.copy()
        diff_norm = jnp.linalg.norm(diff, axis=1)
        norm_std = 0.4 * jnp.abs(diff_norm - diff_norm_old) + 0.6 * norm_std
        diff_norm_old = diff_norm.copy()

        rnn_state_trajectory.append(np.array(rnn_state).copy())
            
    print(rnn_state.shape)
    print(norm_std.shape)
    rnn_state_np = np.array(rnn_state)

    # 对 rnn_state_np 进行 PCA
    pca = PCA()
    pca.fit(rnn_state_np)

    rnn_state_np_pca = pca.transform(rnn_state_np)

    # 创建KMeans对象，指定聚类数为4
    kmeans = KMeans(n_clusters=4)
    # 对rnn_state_np_pca进行聚类
    kmeans.fit(rnn_state_np_pca)
    # 获取聚类中心的坐标
    cluster_centers = kmeans.cluster_centers_

    return pca, cluster_centers

def main():

    intrinsic_pca, cluster_centers = get_intrinsic_pc()
    # print(intrinsic_pca.explained_variance_ratio_)

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    landscapes, states, goals = [], [], []
    
    landscape_, state_, goal_ = load_task(rpl_config.task_pth, display=False)

    landscapes.append(landscape_)
    states.append(state_)
    goals.append(goal_)

    states = jnp.array(states)
    goals = jnp.array(goals)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)

    bias1 = np.array(tree_leaves[0])
    mat1 = np.array(tree_leaves[1])
    print("mat1.shape: ", mat1.shape)

    mat_obs = np.array(tree_leaves[1])[128:137,:]
    mat_intr = np.array(tree_leaves[1])[0:128,:]
    print("mat_obs.shape: ", mat_obs.shape)

    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    start_time = time.time()

    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = states.copy()
    # set goals of GE
    GE.batched_goals = goals.copy()
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs

    print("shape of concat_obs: ", concat_obs[0].shape)

    rnn_state = model.initial_state(GE.num_envs)
    step_count = 0
    trajectories = []
    goal_record = []

    HS_trajectory = [rnn_state[0]]

    NEA_seq = [0]

    NEAP = np.dot(concat_obs[0][0:9], mat_obs)
    NEAP_zero = np.zeros_like(NEAP)

    NEAPs = []

    intr_field = []

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        step_count += 1

        # 将rnn_state[0]和concat_obs[0]拼接成一个新的向量
        new_vector = np.concatenate((rnn_state[0], concat_obs[0]))
        # 将新向量与mat1相乘，得到长度为128的向量
        result_vector = np.dot(new_vector, mat1) + bias1
        result_vector = np.tanh(result_vector)
        intr_vector = np.dot(rnn_state[0], mat_intr) + bias1
        intr_vector = np.tanh(intr_vector)

        # NEAP = np.dot(concat_obs[0][0:9], mat_obs)
        NEAP = result_vector - intr_vector
        # normalize NEAP
        # if np.linalg.norm(NEAP) > 0:
        #     NEAP = 3* NEAP / np.linalg.norm(NEAP)
        NEAPs.append(NEAP)
        intr_field.append(intr_vector - rnn_state[0])

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)
        batched_goal_reached, concat_obs = GE.step(batched_actions, reset=True)

        # print("diff between rnn_state and result_vector: ", np.linalg.norm(rnn_state[0] - result_vector))

        if len(trajectories)>0:
            if np.array(GE.batched_states[0])[0]!=trajectories[-1][0] or np.array(GE.batched_states[0])[1]!=trajectories[-1][1]:
                NEA_seq.append(0)
                # NEAPs.append(NEAP_zero)
            elif np.array(GE.batched_states[0])[0]==trajectories[-1][0] and np.array(GE.batched_states[0])[1]==trajectories[-1][1]:
                NEA_seq.append(1)
                # NEAPs.append(NEAP)

        trajectories.append(np.array(GE.batched_states[0]))
        goal_record.append(batched_goal_reached[0])
        HS_trajectory.append(np.array(rnn_state[0]))

    print("shape of trajectories: ", np.array(trajectories).shape)
    print("shape of goal_record: ", np.array(goal_record).shape)
    print("shape of rnn_state: ", rnn_state.shape)
    print("shape of HS_trajectory: ", np.array(HS_trajectory).shape)
    print("shape of NEA_seq: ", np.array(NEA_seq).shape)

    print("sum of NEA_seq: ", np.sum(np.array(NEA_seq)))

    HS_pca = intrinsic_pca.transform(np.array(HS_trajectory))
    NEAP_pca = intrinsic_pca.transform(np.array(NEAPs) + intrinsic_pca.mean_)
    intr_pca = intrinsic_pca.transform(np.array(intr_field) + intrinsic_pca.mean_)
    

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    # 计算每个点的颜色值
    colors = np.linspace(0, 1, len(HS_pca))
    # 绘制3D折线图，并为每个点设置颜色
    for i in range(len(HS_pca) - 1):
        ax.plot([HS_pca[i, 0], HS_pca[i+1, 0]], [HS_pca[i, 1], HS_pca[i+1, 1]], [HS_pca[i, 2], HS_pca[i+1, 2]], color=plt.cm.RdYlBu(colors[i]))
    plt.show()


    # 详细观察 [start_i:end_i] 区间内的数据
    # 1. 每个点的本征向量场
    # 2. 每个点的 obs扰动向量
    start_i = len(NEAP_pca)-100
    start_i = 0
    end_i = len(NEAP_pca)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    # 计算每个点的颜色值
    colors = np.linspace(0, 1, len(HS_pca))
    # ax.scatter(HS_pca[start_i:end_i, 0], HS_pca[start_i:end_i, 1], HS_pca[start_i:end_i, 2], c=plt.cm.RdYlBu(colors[start_i:end_i]), s=6)
    # ax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:, 2], c='g', s=140)
    # 绘制3D折线图，并为每个点设置颜色
    for i in range(start_i, end_i - 1):
        ax.plot([HS_pca[i, 0], HS_pca[i+1, 0]], [HS_pca[i, 1], HS_pca[i+1, 1]], [HS_pca[i, 2], HS_pca[i+1, 2]], color=plt.cm.RdYlBu(colors[i]), alpha=0.3)
        # ax.plot([HS_pca[i, 0], HS_pca[i+1, 0]], [HS_pca[i, 1], HS_pca[i+1, 1]], [HS_pca[i, 2], HS_pca[i+1, 2]], color='b', alpha=0.05)
    for i in range(start_i, end_i):
        ax.quiver(HS_pca[i, 0], HS_pca[i, 1], HS_pca[i, 2], NEAP_pca[i, 0], NEAP_pca[i, 1], NEAP_pca[i, 2], color=plt.cm.RdYlBu(colors[i]), length=0.5, arrow_length_ratio=0.3)
        ax.quiver(HS_pca[i, 0], HS_pca[i, 1], HS_pca[i, 2], intr_pca[i, 0], intr_pca[i, 1], intr_pca[i, 2], color='g', length=0.5, arrow_length_ratio=0.3)
    
    # # 创建自定义的RGB贴图
    # rgb_image = np.zeros((100, 100, 3))
    # rgb_image[:, :, 0] = 1.0  # 红色通道
    # rgb_image[:, :, 1] = 0.5  # 绿色通道
    # rgb_image[:, :, 2] = 0.0  # 蓝色通道

    # # 创建X、Y、Z坐标网格
    # X, Y, Z = np.meshgrid(np.linspace(-1, 1, 100), np.linspace(-1, 1, 100), np.linspace(-1, 1, 100))

    # # 将Z参数转换为2D数组
    # Z_flat = Z.flatten()

    # # 绘制3D表面，并将自定义的RGB贴图作为颜色映射
    # ax.plot_surface(X, Y, Z_flat.reshape(X.shape), facecolors=rgb_image)

    
    plt.show()

            
if __name__ == "__main__":
    main()