from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *

from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt
import matplotlib as mpl

import json
from replay_config import *
import argparse

def colorFader(c1,c2,mix=0): #fade (linear interpolate) from color c1 (at mix=0) to c2 (mix=1)
    c1=np.array(mpl.colors.to_rgb(c1))
    c2=np.array(mpl.colors.to_rgb(c2))
    return mpl.colors.to_hex((1-mix)*c1 + mix*c2)

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model, reward_sig=None):
    """ forward pass of the model
    """
    return model.apply(variables, state, x, reward_sig=reward_sig)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json"):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        print("state: ", state)
        print("goal: ", goal)
        print("landscape: ", landscape)
    return landscape, state, goal

def colorFader(c1,c2,mix=0): #fade (linear interpolate) from color c1 (at mix=0) to c2 (mix=1)
    c1=np.array(mpl.colors.to_rgb(c1))
    c2=np.array(mpl.colors.to_rgb(c2))
    return mpl.colors.to_hex((1-mix)*c1 + mix*c2)

def save_rnn_states(states, pth = "./logs/states.json"):
    with open(pth, "w") as f:
        json.dump({"data": np.array(states).tolist()}, f)

def save_everything(states, landscape, start_x, start_y, goal_x, goal_y, trajectory, pth = "./logs/full_report.json"):
    with open(pth, "w") as f:
        json.dump({"landscape": [landscape], 
                   "state": [start_x, start_y],
                   "goal": [goal_x, goal_y],
                   "data": np.array(states).tolist(),
                   "trajectory": trajectory}, f)

c1='red' #blue
c2='blue' #green

def main(i_task = 0):

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--show_trj", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--view_3d", type=str, default=rpl_config.view_3d)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.show_trj
    rpl_config.life_duration = args.life_duration
    rpl_config.view_3d = args.view_3d

    rpl_config.task_pth = "./data/adaptive_trajectory_optimization/task_envs/task_"+str(i_task)+".json"

    cv2.namedWindow("rnn_state_waterfall", 0)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)
    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)
    
    leaf_1 = jnp.copy(tree_leaves[1])
    leaf_3 = jnp.copy(tree_leaves[3])

    reward_signature = np.array(leaf_1[rpl_config.nn_size+4,:])
    
    print("shape of reward_signature: ", reward_signature.shape)

    # plot reward_signature using bar chart
    # make y axis range from -1 to 1
    # plt.ylim(-2, 2)
    # plt.bar(np.arange(0, rpl_config.nn_size), reward_signature)
    # plt.show()

    # pre-process reward_sig
    # reward_signature = jnp.where(jnp.abs(reward_signature)>=1, 0, reward_signature)
    reward_signature_1 = jnp.where(jnp.abs(reward_signature)<=1, 0, reward_signature)

    # reward_signature = 1.1*reward_signature

    # plt.ylim(-2, 2)
    # plt.xticks(np.arange(0, rpl_config.nn_size, 1))
    # plt.bar(np.arange(0, rpl_config.nn_size), reward_signature_1)
    # plt.show()
    

    if rpl_config.nn_type == "vanilla":
        # get elements of params
        tree_leaves = jax.tree_util.tree_leaves(params)
        for i in range(len(tree_leaves)):
            print("shape of leaf ", i, ": ", tree_leaves[i].shape)
        
        # print("params ", params)
        
        # connection matrix of the vanilla rnn
        leaf_1 = jnp.copy(tree_leaves[0])       # bias from input to hidden
        leaf_2 = jnp.copy(tree_leaves[1])       # weights from input to hidden
        leaf_3 = jnp.copy(tree_leaves[2])       # bias from hidden to action
        leaf_4 = jnp.copy(tree_leaves[3])       # weights from hidden to action
        leaf_1 = np.array(leaf_1)
        leaf_2 = np.array(leaf_2)
        leaf_3 = np.array(leaf_3)
        leaf_4 = np.array(leaf_4)

        print("leaf_1: ", leaf_1.shape)
        print("leaf_2: ", leaf_2.shape)
        print("leaf_3: ", leaf_3.shape)
        print("leaf_4: ", leaf_4.shape)


    """ load task
    """
    landscape, state, goal = load_task(pth=rpl_config.task_pth)

    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN_analyse(hidden_dims = rpl_config.nn_size)
        # model = RNN_th_rs1(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # # check if param fits the agent
    # if rpl_config.nn_type == "vanilla":
    #     assert params["params"]["Dense_0"]["kernel"].shape[0] == model.hidden_dims + 10

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = [landscape], width = 12, height = 12, num_envs_per_landscape = 1)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
    GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
    # set goals of GE
    GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
    GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)

    k1 = jax.random.PRNGKey(npr.randint(0, 1000000))
    concat_obs = GE.concat_obs
    rnn_state = model.initial_state(GE.num_envs)

    # result holders
    trajectory = []
    rnn_state_waterfall = []
    reward_perturbation_waterfall = []
    key_frames = []

    pca = PCA()
    pipe = Pipeline([('scaler', StandardScaler()), ('pca', pca)])


    rnn_log_pth = "./data/adaptive_trajectory_optimization/rnn_state_opt_log_128.npy"
    trj_log_pth = "./data/adaptive_trajectory_optimization/trj_log_128.json"
    # load rnn_state_opt_log.npy into a numpy array if rnn_state_opt_log.npy exists
    if os.path.exists(rnn_log_pth):
        rnn_state_opt_log = np.load(rnn_log_pth)
    else:
        rnn_state_opt_log = np.zeros_like(rnn_state)

    # load trj_log.json into a numpy array if trj_log.json exists
    if os.path.exists(trj_log_pth):
        with open(trj_log_pth, "r") as f:
            trj_json = json.load(f)
            trj_log = trj_json["data"]
    else:
        trj_log = []

    current_trj = []

    var_clt_mean_less_than_0_1 = [1, 5, 6, 11, 13, 14, 15, 16, 17, 18, 19, 22, 23, 24, 25, 27, 28, 34, 44, 46, 57, 62, 65, 68, 80, 82, 83, 88, 89, 96, 98, 100, 102, 104, 108, 113, 116, 120, 122]
    # 1, -1
    rnn_feature_0 =  [0.9999582767486572, -0.9831897020339966, 0.9996467232704163, -0.9994123578071594, -0.999851405620575, -0.9999927878379822, 0.999983549118042, 0.9612636566162109, 0.9998806715011597, 0.9932894706726074, -0.9997431635856628, 0.9999991059303284, 0.9999998807907104, -0.9999634027481079, 0.9999784231185913, 0.9996755123138428, -0.9992828369140625, 0.8952787518501282, -0.9999989867210388, 0.9748570919036865, 0.9935382008552551, -0.9999995827674866, 0.9871329665184021, 0.9740328788757324, -0.9953449368476868, 0.9701042771339417, -0.9991737604141235, -0.9999988675117493, -0.9990499019622803, -0.999997079372406, 0.9949736595153809, 0.9822999238967896, -0.9999629855155945, -0.9999940991401672, -0.9986229538917542, 0.9996981024742126, 0.9999974370002747, 0.9871447682380676, -0.9999998807907104]
    # 1, 1
    rnn_feature_1 =  [-0.9998412728309631, 0.9671010375022888, -0.9999982714653015, 0.9992090463638306, 0.9994693994522095, 0.9999602437019348, -0.9925903081893921, -0.9984927773475647, -0.9941282868385315, 0.9965940713882446, 0.9773151278495789, -0.9977096915245056, -0.9996675252914429, 0.9999555349349976, -0.9999988675117493, -0.9978929758071899, 0.9999995231628418, -0.9997955560684204, 0.9996235966682434, 0.9973439574241638, -0.9999579787254333, 0.9999728798866272, 0.9823980927467346, -0.9997960329055786, 0.9941484332084656, -0.9999427795410156, 0.9995871186256409, 0.9999239444732666, 0.9942917823791504, 0.9999849796295166, -0.9987909197807312, -0.9999961256980896, 0.9863384366035461, 0.9999999403953552, 0.9593451619148254, -0.9977843165397644, -0.9793924689292908, -0.9543303847312927, 0.9943166375160217]
    # -1, -1
    rnn_feature_2 =  [0.9999920725822449, -0.9973777532577515, 0.9999567866325378, -0.9910239577293396, -0.9999980926513672, -0.9999908804893494, 0.9994378089904785, 0.9657203555107117, 0.9999774098396301, -0.9924333691596985, -0.9999697804450989, 0.9999998807907104, 0.9999997615814209, -0.9969027638435364, 0.9999698996543884, 0.9999940991401672, -0.999999463558197, 0.9991713166236877, -0.9999998807907104, -0.9957963228225708, 0.9988913536071777, -0.9999998807907104, -0.9945865869522095, 0.9999521970748901, -0.9996950626373291, 0.9985164403915405, -0.998382568359375, -0.9999978542327881, -0.9999244809150696, -0.9996201395988464, 0.999308168888092, 0.9999228715896606, -0.9999930262565613, -0.9999990463256836, -0.9861818552017212, 0.9988118410110474, 0.9999998807907104, 0.9948789477348328, -0.9999997615814209]
    # -1, 1
    rnn_feature_3 =  [-0.9705339074134827, 0.9958152174949646, -0.9995390772819519, 0.9999624490737915, 0.999870240688324, 0.9999515414237976, -0.9939287304878235, -0.9997881054878235, -0.9936801791191101, -0.977760910987854, 0.9972586035728455, -0.9999999403953552, -0.9999999403953552, 0.9999993443489075, -0.9999997019767761, -0.9981520771980286, 0.994085431098938, -0.9939032793045044, 0.9999999403953552, -0.991743266582489, -0.9992260932922363, 0.9999999403953552, -0.9556638598442078, -0.9771517515182495, 0.9983199238777161, -0.9999507069587708, 0.9929742217063904, 0.9999997615814209, 0.9999983906745911, 0.9997593760490417, -0.9996225833892822, -0.9998718500137329, 0.9999980330467224, 0.9999902844429016, 0.9994619488716125, -0.9978642463684082, -0.9999993443489075, -0.9995915293693542, 0.9999999403953552]


    while True:

        skip_analyse = False
        step_count = 0
        render_id = 0
        start_x, start_y = 20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10
        start_x, start_y = int(start_x), int(start_y)

        init_x, init_y, goal_x, goal_y = int(GE.batched_states[render_id][0]), int(GE.batched_states[render_id][1]), int(GE.batched_goals[render_id][0]), int(GE.batched_goals[render_id][1])
        lifetime_trajectory = []
        step_counts = []

        batched_actions = jnp.array([0])

        reset_counter = 50
        reset_event = False
        reset_cmd = False
        reset_cmd = True

        rnn_state_goal = None

        for t in range(rpl_config.life_duration):

            _, k2 = jax.random.split(k1)
            k1 = k2

            step_count += 1

            # if batched_goal_reached[0] == True and reset_event == False:
            #     reset_event = True
            # if reset_event == True:
            #     reset_counter -= 1
            # if reset_counter <= 0:
            #     reset_cmd = True
            #     reset_counter = 10
            #     reset_event = False

            # reset_cmd = False
            
            rnn_state, y1, reward_perturbation = model_forward(params, rnn_state, concat_obs, model, reward_signature)
            batched_actions = get_action_vmap(y1)

            batched_goal_reached, concat_obs = GE.step(batched_actions, reset=reset_cmd)

            static_feature = rnn_state[0, var_clt_mean_less_than_0_1]
            sim_to_feature0 = np.dot(static_feature, rnn_feature_0)
            sim_to_feature1 = np.dot(static_feature, rnn_feature_1)
            sim_to_feature2 = np.dot(static_feature, rnn_feature_2)
            sim_to_feature3 = np.dot(static_feature, rnn_feature_3)

            # print("reward_perturbation: ", reward_perturbation)

            # update result holders
            rnn_state_waterfall.append(rnn_state[0])
            reward_perturbation_waterfall.append(reward_perturbation[0])

            trajectory.append([int(20 * GE.batched_states[render_id][0]+10), int(20 * GE.batched_states[render_id][1]+10)])
            lifetime_trajectory.append([int(GE.batched_states[render_id][0]), int(GE.batched_states[render_id][1])])

            if batched_goal_reached[0] == True:

                print("sim_to_features: ", sim_to_feature0, sim_to_feature1, sim_to_feature2, sim_to_feature3)

                rnn_state_goal = np.copy(rnn_state)
                current_trj = copy.deepcopy(trajectory)
                img = GE.render(env_id = render_id)
                # draw trajectory on img
                if len(trajectory) > 1:
                    for i in range(len(trajectory)-1):
                        cv2.line(img, (int(trajectory[i][1]), int(trajectory[i][0])), (int(trajectory[i+1][1]), int(trajectory[i+1][0])), (0,130,0), 2)
                cv2.imshow("img", img)
                k = cv2.waitKey(1)
                trajectory.clear()
                trajectory.append([start_x, start_y])
                print(t, "step_count: ", step_count)
                step_counts.append(step_count)
                if step_count < 2:
                    skip_analyse = True
                if k == ord('n'):
                    break
                elif k == ord('q'):
                    exit()
                step_count = 0

                # show key frames
                if rpl_config.show_kf == "True":
                    key_frames.append(img)
                    if len(key_frames) > 0:
                        # concat key_frames into one single image
                        big_img = np.zeros((img.shape[0], img.shape[1]*len(key_frames), 3), dtype=np.uint8)
                        for j in range(len(key_frames)):
                            big_img[:, j*img.shape[1]:(j+1)*img.shape[1], :] = key_frames[j]
                        cv2.imshow("key frames", big_img)
                        print("--------------- Press any key to continue...")
                        cv2.waitKey(0)


        # compare hidden state to reward_signature_1
        h_opt = jnp.where(reward_signature_1!=0, rnn_state_waterfall[-1], 0)
        # # normalize h_opt
        # h_opt = h_opt / np.linalg.norm(h_opt)
        # sim_hopt_rsig1 = jnp.dot(h_opt, reward_signature_1 / np.linalg.norm(reward_signature_1))
        # print("sim_hopt_rsig1: ", sim_hopt_rsig1)
        # plot h_opt and reward_signature_1
        # plt.plot(h_opt, label="h_opt")
        # plt.plot(reward_signature_1, label="reward_signature_1")
        # plt.legend()
        # plt.show()
        # plt.plot(rnn_state_waterfall[-1], label="rnn_state_opt")
        # plt.show()

        # # update rnn_state_opt_log
        # # rnn_state_opt_log = np.append(rnn_state_opt_log, rnn_state, axis=0)
        # if rnn_state_goal is not None:

        #     rnn_state_opt_log = np.append(rnn_state_opt_log, rnn_state_goal, axis=0)
        #     # save rnn_state_opt_log
        #     np.save(rnn_log_pth, rnn_state_opt_log)

        #     # save trj_log
        #     trj_log.append(current_trj)
        #     pth = trj_log_pth
        #     with open(pth, 'w') as f:
        #         json.dump({"data": trj_log, 
        #             }, f)

        return

        """ visualize neural trajectory
        """
        if not skip_analyse and len(rnn_state_waterfall) >= 2:

            # draw rnn_state_waterfall into a color map of size (rnn_state[0].shape[0], life_time)
            rnn_state_waterfall_np = np.array(rnn_state_waterfall)
            rnn_state_waterfall_np = rnn_state_waterfall_np + 1.0
            rnn_state_waterfall_np = rnn_state_waterfall_np / np.max(rnn_state_waterfall_np)
            rnn_state_waterfall_np = rnn_state_waterfall_np * 255
            rnn_state_waterfall_np = rnn_state_waterfall_np.astype(np.uint8)
            rnn_state_waterfall_np = np.transpose(rnn_state_waterfall_np, (1, 0))
            rnn_state_waterfall_np = cv2.applyColorMap(rnn_state_waterfall_np, cv2.COLORMAP_VIRIDIS)
            cv2.imshow("rnn_state_waterfall", rnn_state_waterfall_np)
            cv2.waitKey(0)

            if rpl_config.view_3d == "True":

                time_steps = [0]
                for i in range(0, len(step_counts)):
                    time_steps.append(step_counts[i]+time_steps[i])

                print(time_steps)

                orthogonality_record = []
                orthogonality_record_pcs = []
                cross_similarity_record = []
                trajectory_rs_corelation_record = []
                ro_record = []

                for i in range(1, len(time_steps)-2):
                    reward_perturbation = reward_perturbation_waterfall[time_steps[i]-1]
                    reward_perturbation_norm = reward_perturbation / np.linalg.norm(reward_perturbation)

                    print("reward_perturbation norm : ",np.linalg.norm(reward_perturbation))
                    
                    orthogonality = []
                    orthogonality_pcs = []
                    start_state = rnn_state_waterfall[time_steps[i-1]]
                    end_state = rnn_state_waterfall[time_steps[i]-1] - reward_perturbation

                    tr_length = 0
                    trial_trajectory = []
                    for t in range(time_steps[i-1], time_steps[i]-2):
                        delta_state = np.array(rnn_state_waterfall[t+1]) - np.array(rnn_state_waterfall[t])
                        delta_state_norm = delta_state / np.linalg.norm(delta_state)
                        tr_length += np.linalg.norm(delta_state)
                        orthogonality.append(np.dot(reward_perturbation_norm,delta_state_norm))
                        trial_trajectory.append(delta_state_norm)

                    end_delta_state = end_state - rnn_state_waterfall[time_steps[i]-2]
                    end_delta_state_norm = end_delta_state / np.linalg.norm(end_delta_state)
                    trial_trajectory.append(end_delta_state_norm)

                    # do pca analysis on delta_state
                    xt = pipe.fit_transform(np.array(trial_trajectory))
                    pca_ = PCA().fit(np.array(trial_trajectory))
                    # get principal components of the trajectory
                    pc1 = pca_.components_[0]
                    pc2 = pca_.components_[1]
                    pc3 = pca_.components_[2]
                    pc4 = pca_.components_[3]

                    orthogonality_pcs.append(np.dot(reward_perturbation_norm, pc1))
                    orthogonality_pcs.append(np.dot(reward_perturbation_norm, pc2))
                    orthogonality_pcs.append(np.dot(reward_perturbation_norm, pc3))
                    # orthogonality_pcs.append(np.dot(reward_perturbation_norm, pc4))

                    # print("orthogonality to pc1: ", np.dot(reward_perturbation_norm, pc1))
                    # print("orthogonality to pc2: ", np.dot(reward_perturbation_norm, pc2))
                    # print("orthogonality to pc3: ", np.dot(reward_perturbation_norm, pc3))
                    # print("orthogonality to pc4: ", np.dot(reward_perturbation_norm, pc4))

                    # print("angle between reward_perturbation_norm and pc1 in degree: ", np.arccos(np.dot(reward_perturbation_norm, pc1))*180/np.pi)
                    # print("angle between reward_perturbation_norm and pc2 in degree: ", np.arccos(np.dot(reward_perturbation_norm, pc2))*180/np.pi)
                    # print("angle between reward_perturbation_norm and pc3 in degree: ", np.arccos(np.dot(reward_perturbation_norm, pc3))*180/np.pi)
                    # print("angle between reward_perturbation_norm and pc4 in degree: ", np.arccos(np.dot(reward_perturbation_norm, pc4))*180/np.pi)

                    # # print the top 6 variance ratio of pca
                    # print("pca variance ratio: ", pca_.explained_variance_ratio_[:6])

                    # # visualize trajectory in 3d space
                    # ax = plt.axes(projection='3d')
                    # n = len(trial_trajectory)-1
                    # for j in range(0,n):
                    #     ax.plot([xt[j, 0], xt[j+1, 0]], [xt[j, 1], xt[j+1, 1]], [xt[j, 2], xt[j+1, 2]], color=colorFader(c1,c2,j/n), linewidth = 1)
                    # print("--------------- Close window to continue...")
                    # plt.show()

                    offset = end_state - start_state

                    trajectory_rs_corelation = np.dot(start_state/np.linalg.norm(start_state), reward_perturbation_norm)
                    trajectory_rs_corelation_record.append(trajectory_rs_corelation)

                    print("trajectory_rs_corelation: ", trajectory_rs_corelation)
                    print("------------------- tr_length: ", tr_length)

                    # print("offset - reward_perturbation: ", np.linalg.norm(offset + reward_perturbation))
                    # print("offset : ", np.linalg.norm(offset))
                    print("orthogonality bt reward_perturbation and offset: ", np.abs(np.dot(reward_perturbation_norm, offset/np.linalg.norm(offset))))
                    ro_record.append(np.abs(np.dot(reward_perturbation_norm, offset/np.linalg.norm(offset))))
                    
                    # print(orthogonality)
                    print("step range : ", time_steps[i-1], time_steps[i]-1)
                    print("mean orthogonality: ", np.mean(np.abs(np.array(orthogonality))))
                    print("max orthogonality: ", np.max(np.abs(np.array(orthogonality))), "on step: ", np.argmax(np.abs(np.array(orthogonality))))
                    orthogonality_record.append(np.mean(np.abs(np.array(orthogonality))))
                    orthogonality_record_pcs.append(np.mean(np.abs(np.array(orthogonality_pcs))))

                # print ro_record with ',' as separator
                # print(','.join(map(str, orthogonality_record)))

                print("mean orthogonality_record_pcs: ", np.mean(np.array(orthogonality_record_pcs)))
                    
                # plot orthogonality_record
                # set y axis to be 0-1
                plt.ylim(0, 1)
                plt.plot(orthogonality_record)
                plt.show()
                # plt.plot(ro_record)
                # plt.show()
                plt.plot(orthogonality_record_pcs)
                plt.show()

                # print(step_counts)
                # print("shape of rnn_state_waterfall_np: ", np.array(rnn_state_waterfall).shape)
                # # do pca analysis on rnn_state_waterfall_np
                # xt = pipe.fit_transform(np.array(rnn_state_waterfall))
                # print("shape of xt: ", xt.shape)
                # ax = plt.axes(projection='3d')
                # n = len(rnn_state_waterfall)-1
                # for i in range(0,n):
                #     ax.plot([xt[i, 0], xt[i+1, 0]], [xt[i, 1], xt[i+1, 1]], [xt[i, 2], xt[i+1, 2]], color=colorFader(c1,c2,i/n), linewidth = 1)
                # # draw step_counts as dots
                # k = 0
                # for i in range(len(step_counts)-1):
                #     k += step_counts[i] - 1
                #     ax.scatter(xt[k, 0], xt[k, 1], xt[k, 2], color='green', s=20)
                # plt.show()
            else:
                # save rnn_state_waterfall to json file
                pth = "./logs/full_report.json"
                save_everything(rnn_state_waterfall, landscape, init_x, init_y, goal_x, goal_y, lifetime_trajectory, pth)
                print("full_report saved to: ", pth)


        """ reset environment and task
        """
        print("reset")
        key_frames.clear()
        rnn_state_waterfall.clear()
        rnn_state = model.initial_state(GE.num_envs)
        # rnn_state = model.initial_state_rnd(GE.num_envs, k1_new)

        GE.reset()
        # set states of GE
        GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
        GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
        # set goals of GE
        GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
        GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
        GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
        GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
        GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
        GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
        concat_obs = GE.concat_obs

        trajectory.clear()
        start_x, start_y = 20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10
        trajectory.append([start_x, start_y])
        step_count = 0

if __name__ == "__main__":

    # count the files in ./data/adaptive_trajectory_optimization/task_envs/
    f_num = len([name for name in os.listdir('./data/adaptive_trajectory_optimization/task_envs/') if os.path.isfile(os.path.join('./data/adaptive_trajectory_optimization/task_envs/', name))])
    print("f_num: ", f_num)
    f_num = 251
    for i in range(f_num):
        main(i)