from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *

from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
import matplotlib.pyplot as plt

import json
from replay_config import *
import argparse

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# @partial(jax.jit, static_argnums=(1,))
# def get_action(y, action_threshold: float = 0.2):
#     """ get deterministic action from the model output
#     """
#     arg_max = jnp.argmax(y)
#     arg_max = jnp.where(jnp.max(y) >= action_threshold, arg_max, 4)
#     return arg_max

# """ auto batched get_action
# """
# get_action_vmap = jax.vmap(get_action, in_axes=(0, None))

# save current landscape as json file
def save_current_task(landscape, start_x, start_y, goal_x, goal_y, pth = "./data/adaptive_trajectory_optimization/landscape.json"):
    landscape_ = []
    for j in range(landscape[0].shape[0]):
        landscape_.append(int(landscape[0][j]))

    with open(pth, "w") as f:
        json.dump({"data": landscape_, 
                   "state": [start_x, start_y],
                   "goal": [goal_x, goal_y]}, f)

def main():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--show_trj", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.show_trj
    rpl_config.life_duration = args.life_duration

    os.environ['CUDA_VISIBLE_DEVICES']='0'

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    """ create landscape
    """
    landscape = generate_maze_pool(num_mazes=1, width=10, height=10)
    landscape = padding_landscapes(landscape, width=12, height=12)

    print("landscape :")
    print(landscape)

    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
        # model = RNN_th_rs1(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # # check if param fits the agent
    # if rpl_config.nn_type == "vanilla":
    #     assert params["params"]["Dense_0"]["kernel"].shape[0] == model.hidden_dims + 10

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = landscape, width = 12, height = 12, num_envs_per_landscape = 1)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    k1 = jax.random.PRNGKey(npr.randint(0, 1000000))
    concat_obs = GE.concat_obs
    rnn_state = model.initial_state(GE.num_envs)

    # result holders
    trajectory = []
    rnn_state_waterfall = []
    key_frames = []

    task_ab = True

    # count the files in the folder "./data/adaptive_trajectory_optimization/task_envs/"
    # n_files = len(os.listdir("./data/adaptive_trajectory_optimization/task_envs/"))
    n_files = len(os.listdir("./data/adaptive_trajectory_optimization/task_envs_gru/"))
    n_task = n_files

    stationary_threshold = 1.5

    while True:

        total_step_count = 0
        step_count = 0
        render_id = 0
        start_x, start_y = 20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10

        init_x, init_y, goal_x, goal_y = int(GE.batched_states[render_id][0]), int(GE.batched_states[render_id][1]), int(GE.batched_goals[render_id][0]), int(GE.batched_goals[render_id][1])

        step_count_var = 100
        step_count_t_1 = 0

        trial1_len = -1

        batched_actions = jnp.array([0])

        for t in range(rpl_config.life_duration):

            _, k2 = jax.random.split(k1)
            k1 = k2

            step_count += 1

            batched_goal_reached, concat_obs = GE.step(batched_actions)
            rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
            batched_actions = get_action_vmap(y1)

            # update result holders
            rnn_state_waterfall.append(rnn_state[0])
            trajectory.append([20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10])

            # if step_count > 600:
            #     break

            if batched_goal_reached[0] == True:

                if trial1_len<0 :
                    trial1_len = step_count

                total_step_count += step_count

                # compute variance of step_count
                step_count_var = 0.6*step_count_var + 0.4*abs(step_count - step_count_t_1)
                step_count_t_1 = step_count
                print("step_count_var: ", step_count_var)

                # img = GE.render(env_id = render_id)

                # if step_count_var <= stationary_threshold:
                #     # draw a green rectangle around the img
                #     cv2.rectangle(img, (0, 0), (img.shape[1], img.shape[0]), (0, 255, 0), 20)

                # # draw trajectory on img
                # if len(trajectory) > 1:
                #     for i in range(len(trajectory)-1):
                #         cv2.line(img, (int(trajectory[i][1]), int(trajectory[i][0])), (int(trajectory[i+1][1]), int(trajectory[i+1][0])), (0,130,0), 2)
                # cv2.imshow("img", img)
                # k = cv2.waitKey(1)

                trajectory.clear()
                trajectory.append([start_x, start_y])
                print("step_count: ", step_count)

                if total_step_count > rpl_config.life_duration or trial1_len<=20:
                    total_step_count = 0
                    step_count_var = 100
                    step_count_t_1 = 0
                    trial1_len = -1
                    # 2. reset
                    GE.reset()
                    # set states of GE
                    GE.batched_states = GE.batched_states.at[0, 0].set(init_x)
                    GE.batched_states = GE.batched_states.at[0, 1].set(init_y)
                    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
                    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
                    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
                    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
                    concat_obs = GE.concat_obs
                    trajectory.clear()
                    start_x, start_y = 20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10
                    trajectory.append([start_x, start_y])
                    step_count = 0
                    init_x, init_y, goal_x, goal_y = int(GE.batched_states[render_id][0]), int(GE.batched_states[render_id][1]), int(GE.batched_goals[render_id][0]), int(GE.batched_goals[render_id][1])
                    print("reset")

                if step_count_var <= stationary_threshold and trial1_len>0 and step_count<=4:
                    total_step_count = 0
                    step_count_var = 100
                    step_count_t_1 = 0
                    trial1_len = -1
                    # 2. reset
                    GE.reset()
                    # set states of GE
                    GE.batched_states = GE.batched_states.at[0, 0].set(init_x)
                    GE.batched_states = GE.batched_states.at[0, 1].set(init_y)
                    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
                    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
                    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
                    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
                    concat_obs = GE.concat_obs
                    trajectory.clear()
                    start_x, start_y = 20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10
                    trajectory.append([start_x, start_y])
                    step_count = 0
                    init_x, init_y, goal_x, goal_y = int(GE.batched_states[render_id][0]), int(GE.batched_states[render_id][1]), int(GE.batched_goals[render_id][0]), int(GE.batched_goals[render_id][1])
                    print("reset")

                elif step_count_var <= stationary_threshold and trial1_len>0 and step_count>4 and trial1_len/step_count>=3:

                    total_step_count = 0
                    step_count_var = 100
                    step_count_t_1 = 0
                    trial1_len = -1

                    # 1. save the task
                    task_name = "./data/adaptive_trajectory_optimization/task_envs_gru/task_" + str(n_task) + ".json"
                    task_ab = not task_ab
                    save_current_task(landscape, 
                                    init_x, init_y, goal_x, goal_y,
                                    pth = task_name)
                    print("current task saved : ", task_name)

                    n_task += 1
                    break

                elif step_count_var <= stationary_threshold and trial1_len>0 and step_count>4 and trial1_len/step_count<3:
                    total_step_count = 0
                    step_count_var = 100
                    step_count_t_1 = 0
                    trial1_len = -1
                    # 2. reset
                    GE.reset()
                    # set states of GE
                    GE.batched_states = GE.batched_states.at[0, 0].set(init_x)
                    GE.batched_states = GE.batched_states.at[0, 1].set(init_y)
                    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
                    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
                    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
                    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
                    concat_obs = GE.concat_obs
                    trajectory.clear()
                    start_x, start_y = 20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10
                    trajectory.append([start_x, start_y])
                    step_count = 0
                    init_x, init_y, goal_x, goal_y = int(GE.batched_states[render_id][0]), int(GE.batched_states[render_id][1]), int(GE.batched_goals[render_id][0]), int(GE.batched_goals[render_id][1])
                    print("reset")

                # if k == ord('q'):
                #     exit()
                
                step_count = 0

        """ reset environment and task
        """
        print("reset")
        key_frames.clear()
        rnn_state_waterfall.clear()
        rnn_state = model.initial_state(GE.num_envs)

        print("creating new landscape...")
        # landscape = generate_maze_pool(num_mazes=1, width=10, height=10, weight=0.5)
        # landscape = generate_maze_pool(num_mazes=1, width=10, height=10, weight=0.1)
        landscape = generate_maze_pool(num_mazes=1, width=10, height=10)
        landscape = padding_landscapes(landscape, width=12, height=12)
        GE.set_landscapes(landscape)
        GE.reset()
        concat_obs = GE.concat_obs
        trajectory.clear()
        start_x, start_y = 20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10
        trajectory.append([start_x, start_y])
        step_count = 0

        print("----------------------------------------------")
        

if __name__ == "__main__":
    main()