from grid_env_ideal_obs_repeat_task_free_run import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json

from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
import matplotlib as mpl

import matplotlib.animation as animation
from sklearn.cluster import KMeans

from openTSNE import TSNE
from sklearn.manifold import Isomap

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

def rand_normal_like_tree(key: Any, params, std: float = 1.0, batch_shape: Tuple = ()):
    """Return a pytree like `params` where every element follows standard normal distribution
       May add a batch dim on parameters with batch_shape=(bs,)
    """
    num_vars = len(jax.tree_util.tree_leaves(params))
    treedef = jax.tree_util.tree_structure(params)

    all_keys = jax.random.split(key, num=num_vars)
    noise = jax.tree_util.tree_map(
        lambda g, k: std * jax.random.normal(k, shape= g.shape, dtype=g.dtype),
        params, jax.tree_util.tree_unflatten(treedef, all_keys))

    return noise


# load landscape and states from file
def load_task(pth = "./logs/task.json"):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        print("state: ", state)
        print("goal: ", goal)
        print("landscape: ", landscape)
    return landscape, state, goal

event_type = ""
event_x = 0
event_y = 0

def main():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    # cv2.namedWindow("rnn_state_img", 0)
    # cv2.namedWindow("img", cv2.WINDOW_GUI_NORMAL)

    k1 = jax.random.PRNGKey(npr.randint(0, 1000000))

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # make a random version of params with the same shape of params
    key = jax.random.PRNGKey(npr.randint(0, 1000000))
    params_r = rand_normal_like_tree(key, params, std=1.0, batch_shape=(1,))

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)
    
    leaf_1 = jnp.copy(tree_leaves[1])
    leaf_3 = jnp.copy(tree_leaves[3])

    """ create landscape
    """
    random_task = True
    # check if file on rpl_config.task_pth exists
    if os.path.isfile(rpl_config.task_pth):
        random_task = False

    if random_task:
        landscape = generate_maze_pool(num_mazes=1, width=10, height=10)
        landscape = padding_landscapes(landscape, width=12, height=12)
    else:
        landscape, state, goal = load_task(pth = rpl_config.task_pth)
        landscape = [landscape]

    print("landscape :")
    print(landscape)

    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = landscape, width = 12, height = 12, num_envs_per_landscape = 1)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    if not random_task:
        # set states of GE
        GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
        GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
        # set goals of GE
        GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
        GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
        GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
        GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
        GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
        GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)

    concat_obs = GE.concat_obs
    concat_obs_zero = jnp.zeros_like(concat_obs)
    batched_goal_reached_zero = jnp.zeros_like(GE.batched_goal_reached)

    rnn_state = model.initial_state(GE.num_envs)
    step_count = 0
    render_id = 0

    trajectory = []
    rnn_state_waterfall = []
    rnn_state_waterfall_new = []

    """ rnn state visualization
    """
    neuron_interval = 10
    neuron_interval1 = 6
    canvas_width = 400 + (rnn_state[0].shape[0]+4)*neuron_interval
    canvas_height = 900
    vanilla_vertical = 200
    contribution_height = 400
    # create a canvas to hold rnn_state visualization
    rnn_state_img0 = np.zeros((canvas_height, canvas_width, 3), dtype=np.uint8)

    step_by_step = False
    manual_action = 0

    var_clt_mean_less_than_0_1 = [1, 5, 6, 11, 13, 14, 15, 16, 17, 18, 19, 22, 23, 24, 25, 27, 28, 34, 44, 46, 57, 62, 65, 68, 80, 82, 83, 88, 89, 96, 98, 100, 102, 104, 108, 113, 116, 120, 122]
    rnn_feature_0 =  [0.9999582767486572, -0.9831897020339966, 0.9996467232704163, -0.9994123578071594, -0.999851405620575, -0.9999927878379822, 0.999983549118042, 0.9612636566162109, 0.9998806715011597, 0.9932894706726074, -0.9997431635856628, 0.9999991059303284, 0.9999998807907104, -0.9999634027481079, 0.9999784231185913, 0.9996755123138428, -0.9992828369140625, 0.8952787518501282, -0.9999989867210388, 0.9748570919036865, 0.9935382008552551, -0.9999995827674866, 0.9871329665184021, 0.9740328788757324, -0.9953449368476868, 0.9701042771339417, -0.9991737604141235, -0.9999988675117493, -0.9990499019622803, -0.999997079372406, 0.9949736595153809, 0.9822999238967896, -0.9999629855155945, -0.9999940991401672, -0.9986229538917542, 0.9996981024742126, 0.9999974370002747, 0.9871447682380676, -0.9999998807907104]
    rnn_feature_1 =  [-0.9998412728309631, 0.9671010375022888, -0.9999982714653015, 0.9992090463638306, 0.9994693994522095, 0.9999602437019348, -0.9925903081893921, -0.9984927773475647, -0.9941282868385315, 0.9965940713882446, 0.9773151278495789, -0.9977096915245056, -0.9996675252914429, 0.9999555349349976, -0.9999988675117493, -0.9978929758071899, 0.9999995231628418, -0.9997955560684204, 0.9996235966682434, 0.9973439574241638, -0.9999579787254333, 0.9999728798866272, 0.9823980927467346, -0.9997960329055786, 0.9941484332084656, -0.9999427795410156, 0.9995871186256409, 0.9999239444732666, 0.9942917823791504, 0.9999849796295166, -0.9987909197807312, -0.9999961256980896, 0.9863384366035461, 0.9999999403953552, 0.9593451619148254, -0.9977843165397644, -0.9793924689292908, -0.9543303847312927, 0.9943166375160217]
    rnn_feature_2 =  [0.9999920725822449, -0.9973777532577515, 0.9999567866325378, -0.9910239577293396, -0.9999980926513672, -0.9999908804893494, 0.9994378089904785, 0.9657203555107117, 0.9999774098396301, -0.9924333691596985, -0.9999697804450989, 0.9999998807907104, 0.9999997615814209, -0.9969027638435364, 0.9999698996543884, 0.9999940991401672, -0.999999463558197, 0.9991713166236877, -0.9999998807907104, -0.9957963228225708, 0.9988913536071777, -0.9999998807907104, -0.9945865869522095, 0.9999521970748901, -0.9996950626373291, 0.9985164403915405, -0.998382568359375, -0.9999978542327881, -0.9999244809150696, -0.9996201395988464, 0.999308168888092, 0.9999228715896606, -0.9999930262565613, -0.9999990463256836, -0.9861818552017212, 0.9988118410110474, 0.9999998807907104, 0.9948789477348328, -0.9999997615814209]
    rnn_feature_3 =  [-0.9705339074134827, 0.9958152174949646, -0.9995390772819519, 0.9999624490737915, 0.999870240688324, 0.9999515414237976, -0.9939287304878235, -0.9997881054878235, -0.9936801791191101, -0.977760910987854, 0.9972586035728455, -0.9999999403953552, -0.9999999403953552, 0.9999993443489075, -0.9999997019767761, -0.9981520771980286, 0.994085431098938, -0.9939032793045044, 0.9999999403953552, -0.991743266582489, -0.9992260932922363, 0.9999999403953552, -0.9556638598442078, -0.9771517515182495, 0.9983199238777161, -0.9999507069587708, 0.9929742217063904, 0.9999997615814209, 0.9999983906745911, 0.9997593760490417, -0.9996225833892822, -0.9998718500137329, 0.9999980330467224, 0.9999902844429016, 0.9994619488716125, -0.9978642463684082, -0.9999993443489075, -0.9995915293693542, 0.9999999403953552]


    lv_pop = [0, 1, 2, 3, 5, 7, 8, 9, 11, 12, 14, 15, 16, 17, 18, 19, 21, 23, 24, 26, 27, 29, 31, 32, 33, 36, 37, 40, 41, 42, 43, 44, 45, 47, 48, 49, 51, 52, 53, 55, 57, 61, 62, 65, 66, 67, 69, 70, 72, 74, 75, 76, 77, 80, 81, 82, 83, 84, 87, 88, 89, 90, 92, 93, 95, 96, 97, 98, 99, 103, 105, 106, 107, 108, 109, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 123, 124, 125, 126]
    lv_pop = [3, 5, 8, 14, 15, 16, 18, 19, 21, 23, 24, 26, 27, 29, 36, 39, 40, 44, 52, 53, 55, 65, 66, 69, 74, 75, 76, 77, 80, 81, 84, 89, 90, 92, 93, 95, 96, 98, 99, 104, 105, 106, 108, 109, 111, 112, 113, 115, 117, 118, 121, 123, 125, 126]
    lv_pop = [1, 3, 9, 10, 13, 16, 18, 19, 20, 21, 22, 23, 30, 31, 34, 37, 38, 39, 40, 41, 43, 45, 46, 49, 52, 55, 56, 57, 60, 61, 64, 65, 67, 71, 73, 74, 75, 76, 81, 82, 83, 84, 86, 88, 90, 94, 96, 97, 98, 99, 102, 103, 104, 105, 106, 109, 114, 116, 118, 119, 122, 126, 127]

    pca = PCA()
    pipe = Pipeline([('scaler', StandardScaler()), ('pca', pca)])

    quadrant_beleived_old = 0
    quadrant_list = []

    quadrant_beleived_old1 = 0
    quadrant_list1 = []

    test_period = rpl_config.life_duration

    # get original trajectory
    for t_ in range(test_period):

        step_count += 1

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        # rnn_state, y1 = model_forward(params_r, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)
        batched_goal_reached, concat_obs = GE.step(batched_actions)

        # print(concat_obs)

        rnn_state_waterfall.append(rnn_state[0].tolist())

        # compute the variance of rnn_state_waterfall in a window of 20 steps
        rnn_state_window = rnn_state_waterfall[-20:]
        rnn_state_window_var = np.var(rnn_state_window, axis=0)

        # print("rnn_state_window_var: ", rnn_state_window_var)
        # print("")

        static_feature = rnn_state[0, var_clt_mean_less_than_0_1]
        sim_to_feature0 = np.dot(static_feature, rnn_feature_0)
        sim_to_feature1 = np.dot(static_feature, rnn_feature_1)
        sim_to_feature2 = np.dot(static_feature, rnn_feature_2)
        sim_to_feature3 = np.dot(static_feature, rnn_feature_3)
        # print("sim_to_features: ", sim_to_feature0, sim_to_feature1, sim_to_feature2, sim_to_feature3)

        quadrant_beleived = np.argmax([sim_to_feature0, sim_to_feature1, sim_to_feature2, sim_to_feature3])

        if quadrant_beleived != quadrant_beleived_old:
            quadrant_list.append(quadrant_beleived)
            quadrant_beleived_old = quadrant_beleived

    rnn_state = model.initial_state(GE.num_envs)
    # add a small perturbation to the rnn_state
    rnn_state = rnn_state.at[0,0].set(0.01)
    # rnn_state = jax.tree_map(lambda x: x + 0.01, rnn_state)
    # print("rnn_state :", rnn_state)

    GE.reset()
    # set states of GE
    GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
    GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
    # set goals of GE
    GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
    GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs

    # get perturbed trajectory
    for t_ in range(test_period):
            
        step_count += 1

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        # rnn_state, y1 = model_forward(params_r, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)
        batched_goal_reached, concat_obs = GE.step(batched_actions)

        rnn_state_waterfall_new.append(rnn_state[0].tolist())

        # compute the variance of rnn_state_waterfall in a window of 20 steps
        rnn_state_window = rnn_state_waterfall[-20:]
        rnn_state_window_var = np.var(rnn_state_window, axis=0)

        # print("rnn_state_window_var: ", rnn_state_window_var)
        # print("")

        static_feature = rnn_state[0, var_clt_mean_less_than_0_1]
        sim_to_feature0 = np.dot(static_feature, rnn_feature_0)
        sim_to_feature1 = np.dot(static_feature, rnn_feature_1)
        sim_to_feature2 = np.dot(static_feature, rnn_feature_2)
        sim_to_feature3 = np.dot(static_feature, rnn_feature_3)
        # print("sim_to_features: ", sim_to_feature0, sim_to_feature1, sim_to_feature2, sim_to_feature3)

        quadrant_beleived = np.argmax([sim_to_feature0, sim_to_feature1, sim_to_feature2, sim_to_feature3])

        if quadrant_beleived != quadrant_beleived_old1:
            quadrant_list1.append(quadrant_beleived)
            quadrant_beleived_old1 = quadrant_beleived

    rnn_state_waterfall = np.array(rnn_state_waterfall)
    rnn_state_waterfall_new = np.array(rnn_state_waterfall_new)

    

    # # extract the items in the rnn_state_waterfall with idx in lv_pop
    # rnn_state_waterfall1 = []
    # rnn_state_waterfall_new1 = []
    # for s in rnn_state_waterfall:
    #     rnn_state_waterfall1.append(s[lv_pop])
    # for s in rnn_state_waterfall_new:
    #     rnn_state_waterfall_new1.append(s[lv_pop])
    # rnn_state_waterfall = np.array(rnn_state_waterfall1)
    # rnn_state_waterfall_new = np.array(rnn_state_waterfall_new1)

    # print(rnn_state_waterfall.shape)
    # print(rnn_state_waterfall_new.shape)

    # # FFT
    # fft = np.fft.fft(rnn_state_waterfall.T)
    # print("shape of fft: ", fft.shape)
    # fft = np.abs(fft)[:,1:-1]
    # fft = fft / fft.max()
    # freq = np.fft.fftfreq(fft.shape[1], d=1)

    # # traverse all the ffts and conbine the plots into a grid of plots
    # plots = []
    # for n in range(fft.shape[0]):
    #     # get the image of plt.plot(freq[0:fft.shape[1]//2-1], fft[n,0:fft.shape[1]//2-1])
    #     fig = plt.figure()
    #     plt.plot(freq[0:fft.shape[1]//2-1], fft[n,0:fft.shape[1]//2-1])
    #     plt.xlabel('Frequency')
    #     plt.ylabel('Amplitude')
    #     # set y axis range 0-1
    #     plt.ylim(0, 1)
    #     plt.tight_layout()
    #     # plt.show()
    #     fig.canvas.draw()
    #     # Now we can save it to a numpy array.
    #     data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    #     data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
    #     plt.close(fig)
    #     plots.append(data.copy())
    #     # cv2.imshow('data', data)
    #     # cv2.waitKey(0)

    # # combine the plots into a grid of plots
    # plots = np.array(plots)
    # print("shape of plots: ", plots.shape)
    # grid_w = 12
    # grid_h = 12
    # # 计算大图的大小
    # large_img_width = 480 * grid_w 
    # large_img_height = 640 * grid_h

    # # 创建一个空数组用来存放大图
    # large_img = np.zeros((large_img_width, large_img_height, 3), dtype=np.uint8)

    # # 遍历4维数组每个图像
    # for i in range(128):
    #     # 计算图像在大图中的位置
    #     row = i // grid_w  # 行索引
    #     col = i % grid_h   # 列索引

    #     # 获取图像数据
    #     img = plots[i].copy()
        
    #     # 计算图像在大图中的起点坐标
    #     large_img_row_start = row * 480 
    #     large_img_col_start = col * 640

    #     print(i, row, col, large_img_row_start, large_img_col_start)
    #     print("shape of img: ", img.shape)
        
    #     # 将图像数据拷贝到大图中
    #     large_img[large_img_row_start:(large_img_row_start+480), 
    #             large_img_col_start:(large_img_col_start+640), :] = img.copy()
    #     # cv2.imshow('data', img)
    #     # cv2.waitKey(0)

    # # resize large_img by factor of 0.3
    # # large_img = cv2.resize(large_img, (0,0), fx=0.3, fy=0.3)
    # cv2.imshow('large_img', large_img)
    # cv2.waitKey(0)
    # # save large_img
    # # cv2.imwrite("./logs/large_img.png", large_img)

    # fft_neuron = fft[:,0:fft.shape[1]//2-1].copy()
    # fft_neuron = fft_neuron / fft_neuron.max()

    # # perform t-SNE on fft_neuron
    # tsne_embedding = TSNE().fit(fft_neuron)

    # def on_click(event):
    #     if event.mouseevent.button == 1:  # 左键点击
    #         x, y = event.mouseevent.xdata, event.mouseevent.ydata
    #         distances = np.sqrt(np.sum((tsne_embedding - [x, y])**2, axis=1))
    #         i = np.argmin(distances)
    #         cv2.putText(plots[i], str(i), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)
    #         cv2.imshow('FFT', plots[i])
    #         cv2.waitKey(1)

    # fig, ax = plt.subplots()
    # scatter = ax.scatter(tsne_embedding[:, 0], tsne_embedding[:, 1])
    # scatter.set_picker(True)
    # fig.canvas.mpl_connect('pick_event', on_click)
    # plt.show()
    # # plot the tsne_embedding
    # plt.scatter(tsne_embedding[:, 0], tsne_embedding[:, 1])
    # plt.show()

    # # perform isomap on the rnn_state_opt_log_cluster
    # isomap_ = Isomap(n_components=2)
    # isomap_.fit(fft_neuron)
    # plt.scatter(isomap_.embedding_[:, 0], isomap_.embedding_[:, 1])
    # # title
    # plt.title("isomap")
    # plt.show()

    return

    # compute the distance between the two trajectories
    dist = []
    for i in range(rnn_state_waterfall.shape[0]):
        dist.append(np.linalg.norm(rnn_state_waterfall[i] - rnn_state_waterfall_new[i]))
    dist = np.array(dist)
    # print("shape of dist :", dist.shape)
    lnr = np.log(dist)
    slope, intercept = np.polyfit(list(range(len(lnr))), lnr, deg=1)
    print("slope :", slope)
    # plot lnr
    plt.plot(lnr)
    # name the char "lyapunov exponent"
    plt.title("lyapunov exponent: " + str(slope))
    plt.show()

    plt.plot(dist)
    plt.show()

    # plot quadrant_beleived_old and quadrant_beleived_old1 
    # and show them side-by-side
    plt.subplot(1,2,1)
    plt.plot(quadrant_list)
    plt.subplot(1,2,2)
    plt.plot(quadrant_list1)
    plt.show()

    print("quadrant_list :", quadrant_list)
    print("quadrant_list1 :", quadrant_list1)

    # do PCA on rnn_state_waterfall
    def colorFader(c1,c2,mix=0): #fade (linear interpolate) from color c1 (at mix=0) to c2 (mix=1)
        c1=np.array(mpl.colors.to_rgb(c1))
        c2=np.array(mpl.colors.to_rgb(c2))
        return mpl.colors.to_hex((1-mix)*c1 + mix*c2)
    c1='red' #blue
    c2='blue' #green

    # load spacial trajectory
    rnn_log_pth = "./data/adaptive_trajectory_optimization/rnn_state_opt_log_128_gru.npy"
    rnn_state_opt_log = np.load(rnn_log_pth)
    pca0 = PCA()  
    pca0.fit(rnn_state_opt_log)
    # clustering
    kmeans = KMeans(n_clusters=4, random_state=0).fit(rnn_state_opt_log)
    # get the center of each cluster
    cluster_centers = kmeans.cluster_centers_
    # print("center of each cluster: ", cluster_centers)
    print("shape of cluster_centers: ", cluster_centers.shape)

    pca = PCA()  
    pca.fit(np.array(rnn_state_waterfall))

    # xt = pca0.transform(np.array(rnn_state_waterfall)) 
    # xt1 = pca0.transform(np.array(rnn_state_waterfall_new))

    xt = pca.transform(np.array(rnn_state_waterfall))
    xt1 = pca.transform(np.array(rnn_state_waterfall_new))

    cluster_centers_pca = pca.transform(cluster_centers)

    k1 = 0
    k2 = 1
    k3 = 2

    fig = plt.figure()
    ax = plt.axes(projection='3d')
    # plot cluster_centers_pca
    ax.scatter3D(cluster_centers_pca[:, k1], cluster_centers_pca[:, k2], cluster_centers_pca[:, k3], c='red', s=100)

    line, = ax.plot([], [], [], 'ro', markersize=10)
    # set line color
    line.set_color('red')
    line.set_markevery([])
    line1, = ax.plot([], [], [], 'ro', markersize=10)
    # set line color
    line1.set_color('green')
    line1.set_markevery([])
    n = len(rnn_state_waterfall)-1
    for i in range(0,n):
        ax.plot([xt[i, k1], xt[i+1, k1]], [xt[i, k2], xt[i+1, k2]], [xt[i, k3], xt[i+1, k3]], color=colorFader(c1,c2,i/n), linewidth = 1)
        ax.plot([xt1[i, k1], xt1[i+1, k1]], [xt1[i, k2], xt1[i+1, k2]], [xt1[i, k3], xt1[i+1, k3]], color=colorFader(c1,c2,i/n), linewidth = 1)
    # 动画更新函数 
    def animate(i): 
        line.set_data(xt[i:i+1, k1], xt[i:i+1, k2])
        line.set_3d_properties(xt[i:i+1, k3])
        line.set_markevery([0])

        line1.set_data(xt1[i:i+1, k1], xt1[i:i+1, k2])
        line1.set_3d_properties(xt1[i:i+1, k3])
        line1.set_markevery([0])
        return line,

    print("xt.shape[0]: ", xt.shape[0])
    ani = animation.FuncAnimation(fig, animate, frames=xt.shape[0], interval=10, blit=False)

    plt.show()


if __name__ == "__main__":
    main()