from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *

from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
import matplotlib as mpl

import json
from replay_config import *
import argparse


@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json"):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        print("state: ", state)
        print("goal: ", goal)
        print("landscape: ", landscape)
    return landscape, state, goal

def colorFader(c1,c2,mix=0): #fade (linear interpolate) from color c1 (at mix=0) to c2 (mix=1)
    c1=np.array(mpl.colors.to_rgb(c1))
    c2=np.array(mpl.colors.to_rgb(c2))
    return mpl.colors.to_hex((1-mix)*c1 + mix*c2)

def save_rnn_states(states, pth = "./logs/states.json"):
    with open(pth, "w") as f:
        json.dump({"data": np.array(states).tolist()}, f)

def save_everything(states, landscape, start_x, start_y, goal_x, goal_y, trajectory, pth = "./logs/full_report.json"):
    with open(pth, "w") as f:
        json.dump({"landscape": [landscape], 
                   "state": [start_x, start_y],
                   "goal": [goal_x, goal_y],
                   "data": np.array(states).tolist(),
                   "trajectory": trajectory}, f)

c1='red' #blue
c2='blue' #green

def main():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--show_trj", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--view_3d", type=str, default=rpl_config.view_3d)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.show_trj
    rpl_config.life_duration = args.life_duration
    rpl_config.view_3d = args.view_3d

    cv2.namedWindow("rnn_state_waterfall", 0)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)
    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)
    
    leaf_1 = jnp.copy(tree_leaves[1])
    leaf_3 = jnp.copy(tree_leaves[3])

    # # convert leaf_1 to np array
    leaf_1_np = np.array(leaf_1[0:128,:])
    print("shape of leaf_1_np: ", leaf_1_np.shape)

    reward_signature = np.array(leaf_1[132,:])

    print("shape of reward_signature: ", reward_signature.shape)

    if rpl_config.nn_type == "vanilla":
        # get elements of params
        tree_leaves = jax.tree_util.tree_leaves(params)
        for i in range(len(tree_leaves)):
            print("shape of leaf ", i, ": ", tree_leaves[i].shape)
        
        # print("params ", params)
        
        # connection matrix of the vanilla rnn
        leaf_1 = jnp.copy(tree_leaves[0])       # bias from input to hidden
        leaf_2 = jnp.copy(tree_leaves[1])       # weights from input to hidden
        leaf_3 = jnp.copy(tree_leaves[2])       # bias from hidden to action
        leaf_4 = jnp.copy(tree_leaves[3])       # weights from hidden to action
        leaf_1 = np.array(leaf_1)
        leaf_2 = np.array(leaf_2)
        leaf_3 = np.array(leaf_3)
        leaf_4 = np.array(leaf_4)

        print("leaf_1: ", leaf_1.shape)
        print("leaf_2: ", leaf_2.shape)
        print("leaf_3: ", leaf_3.shape)
        print("leaf_4: ", leaf_4.shape)


    """ load task
    """
    landscape, state, goal = load_task(pth=rpl_config.task_pth)

    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
        # model = RNN_analyse(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == model.hidden_dims + 10

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = [landscape], width = 12, height = 12, num_envs_per_landscape = 1)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
    GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
    # set goals of GE
    GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
    GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)

    k1 = jax.random.PRNGKey(npr.randint(0, 1000000))
    concat_obs = GE.concat_obs
    rnn_state = model.initial_state(GE.num_envs)

    # result holders
    trajectory = []
    rnn_state_waterfall = []
    reward_perturbation_waterfall = []
    key_frames = []

    pca = PCA()
    pipe = Pipeline([('scaler', StandardScaler()), ('pca', pca)])

    # var_clt_mean_less_than_0_1 = [1, 5, 6, 11, 13, 14, 15, 16, 17, 18, 19, 22, 23, 24, 25, 27, 28, 34, 44, 46, 57, 62, 65, 68, 80, 82, 83, 88, 89, 96, 98, 100, 102, 104, 108, 113, 116, 120, 122]
    # # 1, -1
    # rnn_feature_0 =  [0.9999582767486572, -0.9831897020339966, 0.9996467232704163, -0.9994123578071594, -0.999851405620575, -0.9999927878379822, 0.999983549118042, 0.9612636566162109, 0.9998806715011597, 0.9932894706726074, -0.9997431635856628, 0.9999991059303284, 0.9999998807907104, -0.9999634027481079, 0.9999784231185913, 0.9996755123138428, -0.9992828369140625, 0.8952787518501282, -0.9999989867210388, 0.9748570919036865, 0.9935382008552551, -0.9999995827674866, 0.9871329665184021, 0.9740328788757324, -0.9953449368476868, 0.9701042771339417, -0.9991737604141235, -0.9999988675117493, -0.9990499019622803, -0.999997079372406, 0.9949736595153809, 0.9822999238967896, -0.9999629855155945, -0.9999940991401672, -0.9986229538917542, 0.9996981024742126, 0.9999974370002747, 0.9871447682380676, -0.9999998807907104]
    # # 1, 1
    # rnn_feature_1 =  [-0.9998412728309631, 0.9671010375022888, -0.9999982714653015, 0.9992090463638306, 0.9994693994522095, 0.9999602437019348, -0.9925903081893921, -0.9984927773475647, -0.9941282868385315, 0.9965940713882446, 0.9773151278495789, -0.9977096915245056, -0.9996675252914429, 0.9999555349349976, -0.9999988675117493, -0.9978929758071899, 0.9999995231628418, -0.9997955560684204, 0.9996235966682434, 0.9973439574241638, -0.9999579787254333, 0.9999728798866272, 0.9823980927467346, -0.9997960329055786, 0.9941484332084656, -0.9999427795410156, 0.9995871186256409, 0.9999239444732666, 0.9942917823791504, 0.9999849796295166, -0.9987909197807312, -0.9999961256980896, 0.9863384366035461, 0.9999999403953552, 0.9593451619148254, -0.9977843165397644, -0.9793924689292908, -0.9543303847312927, 0.9943166375160217]
    # # -1, -1
    # rnn_feature_2 =  [0.9999920725822449, -0.9973777532577515, 0.9999567866325378, -0.9910239577293396, -0.9999980926513672, -0.9999908804893494, 0.9994378089904785, 0.9657203555107117, 0.9999774098396301, -0.9924333691596985, -0.9999697804450989, 0.9999998807907104, 0.9999997615814209, -0.9969027638435364, 0.9999698996543884, 0.9999940991401672, -0.999999463558197, 0.9991713166236877, -0.9999998807907104, -0.9957963228225708, 0.9988913536071777, -0.9999998807907104, -0.9945865869522095, 0.9999521970748901, -0.9996950626373291, 0.9985164403915405, -0.998382568359375, -0.9999978542327881, -0.9999244809150696, -0.9996201395988464, 0.999308168888092, 0.9999228715896606, -0.9999930262565613, -0.9999990463256836, -0.9861818552017212, 0.9988118410110474, 0.9999998807907104, 0.9948789477348328, -0.9999997615814209]
    # # -1, 1
    # rnn_feature_3 =  [-0.9705339074134827, 0.9958152174949646, -0.9995390772819519, 0.9999624490737915, 0.999870240688324, 0.9999515414237976, -0.9939287304878235, -0.9997881054878235, -0.9936801791191101, -0.977760910987854, 0.9972586035728455, -0.9999999403953552, -0.9999999403953552, 0.9999993443489075, -0.9999997019767761, -0.9981520771980286, 0.994085431098938, -0.9939032793045044, 0.9999999403953552, -0.991743266582489, -0.9992260932922363, 0.9999999403953552, -0.9556638598442078, -0.9771517515182495, 0.9983199238777161, -0.9999507069587708, 0.9929742217063904, 0.9999997615814209, 0.9999983906745911, 0.9997593760490417, -0.9996225833892822, -0.9998718500137329, 0.9999980330467224, 0.9999902844429016, 0.9994619488716125, -0.9978642463684082, -0.9999993443489075, -0.9995915293693542, 0.9999999403953552]
    
    var_clt_mean_less_than_0_1 = [0, 1, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 39, 41, 43, 44, 46, 49, 52, 54, 57, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 71, 74, 77, 79, 80, 82, 83, 84, 85, 86, 88, 89, 90, 91, 92, 96, 97, 98, 99, 100, 101, 102, 103, 104, 106, 107, 108, 109, 110, 113, 115, 116, 118, 120, 121, 122, 123, 124, 125, 126]
    rnn_feature_0 =  [-0.9461527466773987, 0.9999582767486572, -0.975846529006958, -0.9906558990478516, -0.9831897020339966, 0.9996467232704163, 0.9916031956672668, 0.9968320727348328, -0.9124768972396851, -0.9994123578071594, -0.8605355620384216, -0.999851405620575, -0.9999927878379822, 0.999983549118042, 0.9612636566162109, 0.9998806715011597, 0.9932894706726074, -0.9997431635856628, -0.9999997019767761, 0.9999991059303284, 0.9999998807907104, -0.9999634027481079, 0.9999784231185913, 0.9996755123138428, -0.9992828369140625, 0.8932662010192871, -0.9987356662750244, 0.9904150366783142, -0.999648928642273, 0.9468201994895935, 0.8952787518501282, -0.9989546537399292, -0.9982856512069702, 0.9999709129333496, -0.9981957077980042, -0.9974076151847839, -0.9678183197975159, -0.9999989867210388, 0.9748570919036865, -0.9999744892120361, 0.9587243795394897, 0.9830151200294495, 0.9935382008552551, -0.9996662139892578, -0.999999463558197, -0.9789413213729858, -0.9999995827674866, -0.8860974311828613, 0.9345378875732422, 0.9871329665184021, 0.8611211180686951, -0.9988968968391418, 0.9740328788757324, 0.995445966720581, 0.9981663227081299, -0.9995632767677307, 0.9999988675117493, -0.8693735599517822, -0.9953449368476868, 0.9701042771339417, -0.9991737604141235, -0.9907732009887695, -0.990005612373352, 0.9999998807907104, -0.9999988675117493, -0.9990499019622803, -0.946376621723175, 0.9362999200820923, -0.9999972581863403, -0.999997079372406, 0.9736419916152954, 0.9949736595153809, -0.964794933795929, 0.9822999238967896, -0.970579206943512, -0.9999629855155945, -0.999987781047821, -0.9999940991401672, -0.9361556768417358, -0.9990412592887878, -0.9986229538917542, -0.9984415769577026, 0.9999998211860657, 0.9996981024742126, -0.9994224309921265, 0.9999974370002747, 0.9792995452880859, 0.9871447682380676, 0.9999986886978149, -0.9999998807907104, 0.9999998807907104, 0.9490108489990234, 0.9829628467559814, -0.9778906106948853]
    rnn_feature_1 =  [-0.9283203482627869, -0.9998435378074646, -0.9638441205024719, 0.9988398551940918, 0.967153012752533, -0.9999982714653015, -0.9427096843719482, -0.9438421130180359, 0.9670294523239136, 0.999212384223938, -0.9875056743621826, 0.9994784593582153, 0.9999609589576721, -0.9926629662513733, -0.998508095741272, -0.9942294359207153, 0.9966515898704529, 0.9777061939239502, 0.9663264751434326, -0.9977492094039917, -0.9996732473373413, 0.9999483227729797, -0.9999988675117493, -0.9979292154312134, 0.9999995231628418, -0.9752395749092102, 0.922939658164978, 0.9681260585784912, -0.9128158092498779, -0.9994750022888184, -0.9997990131378174, 0.917654275894165, 0.9677172303199768, -0.9653894305229187, -0.8428236842155457, 0.9987689256668091, -0.9331029057502747, 0.9996300935745239, 0.9973886609077454, 0.9643940925598145, 0.9969982504844666, -0.9655174612998962, -0.9999585151672363, 0.965191125869751, 0.9669886231422424, 0.9599414467811584, 0.9999733567237854, 0.9693759679794312, 0.9535602331161499, 0.9827015399932861, 0.9561278820037842, 0.9657979011535645, -0.9997995495796204, -0.9655174016952515, -0.9558514356613159, 0.9520189762115479, -0.9576465487480164, -0.9739927053451538, 0.9942493438720703, -0.9999437928199768, 0.9995939135551453, 0.9995091557502747, 0.9656971096992493, -0.9655343890190125, 0.9999252557754517, 0.9943885207176208, 0.9999865889549255, -0.9767569899559021, 0.9655064940452576, 0.9999851584434509, 0.9700678586959839, -0.9988107085227966, -0.9514037370681763, -0.9999961853027344, 0.958463728427887, 0.9865739941596985, 0.8932945728302002, 0.9999999403953552, 0.9678045511245728, 0.9656780362129211, 0.9585627317428589, 0.9654185771942139, -0.9651579260826111, -0.9978225231170654, 0.965463399887085, -0.9797477722167969, -0.9656004309654236, -0.9550648331642151, -0.9615163207054138, 0.9944142699241638, -0.9655593633651733, -0.9863012433052063, 0.8636111617088318, 0.9887285828590393]
    rnn_feature_2 =  [0.9958351254463196, 0.9999920725822449, 0.9943109154701233, -0.9985464215278625, -0.9973777532577515, 0.9999567866325378, 0.9958251118659973, 0.9876009225845337, -0.9994816184043884, -0.9910239577293396, 0.9949482679367065, -0.9999980926513672, -0.9999908804893494, 0.9994378089904785, 0.9657203555107117, 0.9999774098396301, -0.9924333691596985, -0.9999697804450989, -0.9962242245674133, 0.9999998807907104, 0.9999997615814209, -0.9969027638435364, 0.9999698996543884, 0.9999940991401672, -0.999999463558197, 0.9914127588272095, -0.9996015429496765, -0.9995436072349548, 0.97539883852005, 0.9999948143959045, 0.9991713166236877, -0.9886994957923889, -0.9960036873817444, 0.9949798583984375, 0.9921016693115234, -0.9937861561775208, 0.9988359212875366, -0.9999998807907104, -0.9957963228225708, -0.9823622107505798, -0.9970131516456604, 0.9999969005584717, 0.9988913536071777, -0.9997882843017578, -0.9999831318855286, -0.9045405983924866, -0.9999998807907104, -0.9996019005775452, -0.9999549984931946, -0.9945865869522095, -0.9996899962425232, -0.9977795481681824, 0.9999521970748901, 0.9999998807907104, 0.9997276067733765, -0.692555844783783, 0.995883047580719, 0.9771542549133301, -0.9996950626373291, 0.9985164403915405, -0.998382568359375, -0.9999997615814209, -0.9999960064888, 0.9999998807907104, -0.9999978542327881, -0.9999244809150696, -0.9996898770332336, 0.9799578189849854, -0.999114990234375, -0.9996201395988464, -0.9984035491943359, 0.999308168888092, 0.9999555349349976, 0.9999228715896606, -0.9461102485656738, -0.9999930262565613, -0.9784282445907593, -0.9999990463256836, -0.9999871253967285, -0.9881334900856018, -0.9861818552017212, -0.9999997019767761, 0.9999986886978149, 0.9988118410110474, -0.9999892711639404, 0.9999998807907104, 0.9997080564498901, 0.9948789477348328, 0.9999954700469971, -0.9999997615814209, 0.9999998807907104, 0.9999998807907104, -0.917002260684967, -0.9996073842048645]
    rnn_feature_3 =  [0.929304838180542, -0.9705339074134827, 0.9548043012619019, 0.9257931709289551, 0.9958152174949646, -0.9995390772819519, -0.9857621788978577, -0.9858974814414978, 0.9543991088867188, 0.9999624490737915, 0.990217387676239, 0.999870240688324, 0.9999515414237976, -0.9939287304878235, -0.9997881054878235, -0.9936801791191101, -0.977760910987854, 0.9972586035728455, 1.0, -0.9999999403953552, -0.9999999403953552, 0.9999993443489075, -0.9999997019767761, -0.9981520771980286, 0.994085431098938, -0.9613664746284485, 0.9221969842910767, -0.9750844836235046, 0.9994914531707764, -0.9958109855651855, -0.9939032793045044, 0.9942757487297058, 0.9563018083572388, -0.9999991059303284, 0.995768666267395, 0.9596813917160034, 0.999951958656311, 0.9999999403953552, -0.991743266582489, 0.9800897836685181, -0.8371233940124512, -0.9999257922172546, -0.9992260932922363, 0.9999868273735046, 0.9999557137489319, 0.9107763171195984, 0.9999999403953552, 0.997278094291687, -0.9970739483833313, -0.9556638598442078, -0.9633026123046875, 0.9999330639839172, -0.9771517515182495, -0.9751230478286743, -0.9946858286857605, 0.9999380707740784, -0.9999024271965027, 0.8732730746269226, 0.9983199238777161, -0.9999507069587708, 0.9929742217063904, 0.8123788833618164, 0.9945662617683411, -0.9999999403953552, 0.9999997615814209, 0.9999983906745911, 0.9869656562805176, -0.9268302917480469, 0.9999990463256836, 0.9997593760490417, -0.98912513256073, -0.9996225833892822, 0.9998481273651123, -0.9998718500137329, 0.9972803592681885, 0.9999980330467224, 0.9946923851966858, 0.9999902844429016, 0.9754811525344849, 0.9999573826789856, 0.9994619488716125, 0.9924792051315308, -0.9999999403953552, -0.9978642463684082, 0.9987314343452454, -0.9999993443489075, -0.9999983906745911, -0.9995915293693542, -0.9999723434448242, 0.9999999403953552, -0.9999970197677612, -0.9448661804199219, -0.8927628397941589, 0.90985506772995]

    rnn_feature_0 = np.array(rnn_feature_0)
    rnn_feature_1 = np.array(rnn_feature_1)
    rnn_feature_2 = np.array(rnn_feature_2)
    rnn_feature_3 = np.array(rnn_feature_3)

    while True:

        skip_analyse = False
        step_count = 0
        render_id = 0
        start_x, start_y = 20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10

        init_x, init_y, goal_x, goal_y = int(GE.batched_states[render_id][0]), int(GE.batched_states[render_id][1]), int(GE.batched_goals[render_id][0]), int(GE.batched_goals[render_id][1])
        lifetime_trajectory = []
        step_counts = []

        batched_actions = jnp.array([0])

        for t in range(rpl_config.life_duration):

            _, k2 = jax.random.split(k1)
            k1 = k2

            step_count += 1

            rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
            batched_actions = get_action_vmap(y1)

            batched_goal_reached, concat_obs = GE.step(batched_actions)

            static_feature = rnn_state[0, var_clt_mean_less_than_0_1]
            sim_to_feature0 = np.linalg.norm(static_feature-rnn_feature_0)
            sim_to_feature1 = np.linalg.norm(static_feature-rnn_feature_1)
            sim_to_feature2 = np.linalg.norm(static_feature-rnn_feature_2)
            sim_to_feature3 = np.linalg.norm(static_feature-rnn_feature_3)

            # print("reward_perturbation: ", reward_perturbation)

            # update result holders
            rnn_state_waterfall.append(rnn_state[0])
            # reward_perturbation_waterfall.append(reward_perturbation[0])

            trajectory.append([20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10])
            lifetime_trajectory.append([int(GE.batched_states[render_id][0]), int(GE.batched_states[render_id][1])])

            if batched_goal_reached[0] == True:
                
                print("sim_to_features: ", sim_to_feature0, sim_to_feature1, sim_to_feature2, sim_to_feature3)

                img = GE.render(env_id = render_id)
                # draw trajectory on img
                if len(trajectory) > 1:
                    for i in range(len(trajectory)-1):
                        cv2.line(img, (int(trajectory[i][1]), int(trajectory[i][0])), (int(trajectory[i+1][1]), int(trajectory[i+1][0])), (0,130,0), 2)
                cv2.imshow("img", img)
                k = cv2.waitKey(1)
                trajectory.clear()
                trajectory.append([start_x, start_y])
                print(t, "step_count: ", step_count)
                step_counts.append(step_count)
                if step_count < 2:
                    skip_analyse = True
                if k == ord('n'):
                    break
                elif k == ord('q'):
                    exit()
                step_count = 0

                # show key frames
                if rpl_config.show_kf == "True":
                    key_frames.append(img)
                    if len(key_frames) > 0:
                        # concat key_frames into one single image
                        big_img = np.zeros((img.shape[0], img.shape[1]*len(key_frames), 3), dtype=np.uint8)
                        for j in range(len(key_frames)):
                            big_img[:, j*img.shape[1]:(j+1)*img.shape[1], :] = key_frames[j]
                        cv2.imshow("key frames", big_img)
                        print("--------------- Press any key to continue...")
                        cv2.waitKey(0)

        """ visualize neural trajectory
        """
        if not skip_analyse and len(rnn_state_waterfall) >= 2:

            # draw rnn_state_waterfall into a color map of size (rnn_state[0].shape[0], life_time)
            rnn_state_waterfall_np = np.array(rnn_state_waterfall)
            rnn_state_waterfall_np = rnn_state_waterfall_np + 1.0
            rnn_state_waterfall_np = rnn_state_waterfall_np / np.max(rnn_state_waterfall_np)
            rnn_state_waterfall_np = rnn_state_waterfall_np * 255
            rnn_state_waterfall_np = rnn_state_waterfall_np.astype(np.uint8)
            rnn_state_waterfall_np = np.transpose(rnn_state_waterfall_np, (1, 0))
            rnn_state_waterfall_np = cv2.applyColorMap(rnn_state_waterfall_np, cv2.COLORMAP_VIRIDIS)
            cv2.imshow("rnn_state_waterfall", rnn_state_waterfall_np)
            cv2.waitKey(0)

            # compute the variance of the last 10 elements of rnn_state_waterfall
            rnn_state_waterfall_np = np.array(rnn_state_waterfall)
            rnn_state_waterfall_np = rnn_state_waterfall_np[-10:]
            rnn_state_waterfall_np = np.var(rnn_state_waterfall_np, axis=0)
            # visualize the variance of the last 10 elements of rnn_state_waterfall
            # print rnn_state_waterfall_np
            print("intra-task variance: ", rnn_state_waterfall_np)
            plt.plot(rnn_state_waterfall_np)
            plt.title("intra-task variance")
            plt.show()

            # if rpl_config.view_3d == "True":

            #     # orthogonality = []
            #     # for i in range(len(rnn_state_waterfall)-1):
            #     #     delta_state = np.array(rnn_state_waterfall[i+1]) - np.array(rnn_state_waterfall[i])
            #     #     # compute normalized delta_state and reward_signature
            #     #     delta_state = delta_state / np.linalg.norm(delta_state)
            #     #     reward_signature_norm = reward_signature / np.linalg.norm(reward_signature)
            #     #     orthogonality.append(np.dot(reward_signature_norm,delta_state))

            #     # print("orthogonality: ", np.linalg.norm(reward_signature))
            #     # print("mean orthogonality: ", np.mean(np.abs(np.array(orthogonality))))
            #     # print("=======================")

            #     time_steps = [0]
            #     for i in range(0, len(step_counts)):
            #         time_steps.append(step_counts[i]+time_steps[i])

            #     print(time_steps)

            #     orthogonality_record = []

            #     for i in range(1, len(time_steps)-2):
            #         reward_perturbation = reward_perturbation_waterfall[time_steps[i]-1]
            #         reward_perturbation_norm = reward_perturbation / np.linalg.norm(reward_perturbation)

            #         print(np.linalg.norm(reward_perturbation))
                    
            #         orthogonality = []
            #         for t in range(time_steps[i-1], time_steps[i]-2):
            #             delta_state = np.array(rnn_state_waterfall[t+1]) - np.array(rnn_state_waterfall[t])
            #             delta_state_norm = delta_state / np.linalg.norm(delta_state)
            #             orthogonality.append(np.dot(reward_perturbation_norm,delta_state_norm))
                    
            #         # print(orthogonality)
            #         print("step range : ", time_steps[i-1], time_steps[i]-1)
            #         print("mean orthogonality: ", np.mean(np.abs(np.array(orthogonality))))
            #         orthogonality_record.append(np.mean(np.abs(np.array(orthogonality))))

            #     # plot orthogonality_record
            #     plt.plot(orthogonality_record)
            #     plt.show()

            #     # print(step_counts)
            #     # print("shape of rnn_state_waterfall_np: ", np.array(rnn_state_waterfall).shape)
            #     # # do pca analysis on rnn_state_waterfall_np
            #     # xt = pipe.fit_transform(np.array(rnn_state_waterfall))
            #     # print("shape of xt: ", xt.shape)
            #     # ax = plt.axes(projection='3d')
            #     # n = len(rnn_state_waterfall)-1
            #     # for i in range(0,n):
            #     #     ax.plot([xt[i, 0], xt[i+1, 0]], [xt[i, 1], xt[i+1, 1]], [xt[i, 2], xt[i+1, 2]], color=colorFader(c1,c2,i/n), linewidth = 1)
            #     # # draw step_counts as dots
            #     # k = 0
            #     # for i in range(len(step_counts)-1):
            #     #     k += step_counts[i] - 1
            #     #     ax.scatter(xt[k, 0], xt[k, 1], xt[k, 2], color='green', s=20)
            #     # plt.show()
            # else:
            #     # save rnn_state_waterfall to json file
            #     pth = "./logs/full_report.json"
            #     save_everything(rnn_state_waterfall, landscape, init_x, init_y, goal_x, goal_y, lifetime_trajectory, pth)
            #     print("full_report saved to: ", pth)


        """ reset environment and task
        """
        print("reset")
        key_frames.clear()
        rnn_state_waterfall.clear()
        rnn_state = model.initial_state(GE.num_envs)
        # rnn_state = model.initial_state_rnd(GE.num_envs, k1_new)

        GE.reset()
        # set states of GE
        GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
        GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
        # set goals of GE
        GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
        GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
        GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
        GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
        GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
        GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
        concat_obs = GE.concat_obs

        trajectory.clear()
        start_x, start_y = 20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10
        trajectory.append([start_x, start_y])
        step_count = 0

if __name__ == "__main__":
    main()