from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *

from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt

import json
from replay_config import *
import argparse

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# @partial(jax.jit, static_argnums=(1,))
# def get_action(y, action_threshold: float = 0.2):
#     """ get deterministic action from the model output
#     """
#     arg_max = jnp.argmax(y)
#     arg_max = jnp.where(jnp.max(y) >= action_threshold, arg_max, 4)
#     return arg_max

# """ auto batched get_action
# """
# get_action_vmap = jax.vmap(get_action, in_axes=(0, None))

# save current landscape as json file
def save_current_task(landscape, start_x, start_y, goal_x, goal_y, pth = "./logs/landscape.json"):
    landscape_ = []
    for j in range(landscape[0].shape[0]):
        landscape_.append(int(landscape[0][j]))

    with open(pth, "w") as f:
        json.dump({"data": landscape_, 
                   "state": [start_x, start_y],
                   "goal": [goal_x, goal_y]}, f)

def main():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--show_trj", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.show_trj
    rpl_config.life_duration = args.life_duration

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    """ create landscape
    """
    landscape = generate_maze_pool(num_mazes=1, width=10, height=10)
    landscape = padding_landscapes(landscape, width=12, height=12)

    print("landscape :")
    print(landscape)

    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == model.hidden_dims + 10

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = landscape, width = 12, height = 12, num_envs_per_landscape = 1)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    k1 = jax.random.PRNGKey(npr.randint(0, 1000000))
    concat_obs = GE.concat_obs
    rnn_state = model.initial_state(GE.num_envs)

    # result holders
    trajectory = []
    rnn_state_waterfall = []
    key_frames = []

    pca = PCA()
    pipe = Pipeline([('scaler', StandardScaler()), ('pca', pca)])

    while True:

        skip_analyse = False
        step_count = 0
        render_id = 0
        start_x, start_y = 20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10

        init_x, init_y, goal_x, goal_y = int(GE.batched_states[render_id][0]), int(GE.batched_states[render_id][1]), int(GE.batched_goals[render_id][0]), int(GE.batched_goals[render_id][1])

        for t in range(rpl_config.life_duration):

            _, k2 = jax.random.split(k1)
            k1 = k2

            step_count += 1

            rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
            batched_actions = get_action_vmap(y1)
            batched_goal_reached, concat_obs = GE.step(batched_actions)

            # update result holders
            rnn_state_waterfall.append(rnn_state[0])
            trajectory.append([20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10])

            if batched_goal_reached[0] == True:
                img = GE.render(env_id = render_id)
                # draw trajectory on img
                if len(trajectory) > 1:
                    for i in range(len(trajectory)-1):
                        cv2.line(img, (int(trajectory[i][1]), int(trajectory[i][0])), (int(trajectory[i+1][1]), int(trajectory[i+1][0])), (0,130,0), 2)
                cv2.imshow("img", img)
                k = cv2.waitKey(1)
                trajectory.clear()
                trajectory.append([start_x, start_y])
                print(t, "step_count: ", step_count)
                if step_count <= 3:
                    skip_analyse = True
                if k == ord('n'):
                    break
                elif k == ord('q'):
                    exit()
                step_count = 0

        """ visualize neural trajectory
        """
        if not skip_analyse and len(rnn_state_waterfall) > 3:

            # draw rnn_state_waterfall into a color map of size (rnn_state[0].shape[0], life_time)
            rnn_state_waterfall_np = np.array(rnn_state_waterfall)
            rnn_state_waterfall_np = rnn_state_waterfall_np + 1.0
            rnn_state_waterfall_np = rnn_state_waterfall_np / np.max(rnn_state_waterfall_np)
            rnn_state_waterfall_np = rnn_state_waterfall_np * 255
            rnn_state_waterfall_np = rnn_state_waterfall_np.astype(np.uint8)
            rnn_state_waterfall_np = np.transpose(rnn_state_waterfall_np, (1, 0))
            rnn_state_waterfall_np = cv2.applyColorMap(rnn_state_waterfall_np, cv2.COLORMAP_VIRIDIS)
            # scale the image vertically by 3
            rnn_state_waterfall_np = cv2.resize(rnn_state_waterfall_np, (rnn_state_waterfall_np.shape[1], rnn_state_waterfall_np.shape[0]*3))
            print("--------------- Press any key to continue...")
            cv2.imshow("rnn_state_waterfall", rnn_state_waterfall_np)
            cv2.waitKey(0)

            print("shape of rnn_state_waterfall_np: ", np.array(rnn_state_waterfall).shape)
            # do pca analysis on rnn_state_waterfall_np
            xt = pipe.fit_transform(np.array(rnn_state_waterfall))
            print("shape of xt: ", xt.shape)
            ax = plt.axes(projection='3d')
            ax.scatter(xt[int(len(xt)/5):-1, 0], xt[int(len(xt)/5):-1, 1], xt[int(len(xt)/5):-1, 2])
            ax.plot(xt[:, 0], xt[:, 1], xt[:, 2])
            print("--------------- Close window to continue...")
            plt.show()

        """ reset environment and task
        """
        print("reset")
        key_frames.clear()
        rnn_state_waterfall.clear()
        rnn_state = model.initial_state(GE.num_envs)
        # rnn_state = model.initial_state_rnd(GE.num_envs, k1_new)

        save_current_task(landscape, 
                        init_x, init_y, goal_x, goal_y,
                        pth = rpl_config.task_pth)
        print("current task saved : ", rpl_config.task_pth)


        print("creating new landscape...")
        # landscape = generate_maze_pool(num_mazes=1, width=10, height=10, weight=0.5)
        landscape = generate_maze_pool(num_mazes=1, width=10, height=10)
        landscape = padding_landscapes(landscape, width=12, height=12)
        GE.set_landscapes(landscape)
        GE.reset()
        concat_obs = GE.concat_obs
        trajectory.clear()
        start_x, start_y = 20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10
        trajectory.append([start_x, start_y])
        step_count = 0
        

if __name__ == "__main__":
    main()