from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import matplotlib.pyplot as plt

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json"):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        print("state: ", state)
        print("goal: ", goal)
        print("landscape: ", landscape)
    return landscape, state, goal

# save current landscape as json file
def save_current_task(landscape, start_x, start_y, goal_x, goal_y, pth = "./logs/landscape.json"):
    landscape_ = []
    for j in range(landscape[0].shape[0]):
        landscape_.append(int(landscape[0][j]))

    with open(pth, "w") as f:
        json.dump({"data": landscape_, 
                   "state": [start_x, start_y],
                   "goal": [goal_x, goal_y]}, f)

def render(grid, state, goal, valid = True):
        
        state_x = int(state[0])
        state_y = int(state[1])

        food_x = int(goal[0])
        food_y = int(goal[1])

        grid_size_display = 20
        width, height = grid.shape[0], grid.shape[1]
        img = np.zeros((width * grid_size_display, height * grid_size_display, 3), np.uint8)
        
        for j in range(width):
            for i in range(height):
                if grid[j,i] == 1:
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (255, 255, 255), -1)
                    # draw border with color(100,100,100)
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (100, 100, 100), 1)
                else:
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (0, 0, 0), -1)
                    # draw border with color(100,100,100)
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (100, 100, 100), 1)
                if j == state_x and i == state_y:
                    cv2.circle(img, (i * grid_size_display + int(grid_size_display/2), j * grid_size_display + int(grid_size_display/2)), 7, (0, 0, 255), -1, cv2.LINE_AA)
        
        # put with a dot on food position
        cv2.circle(img, (food_y * grid_size_display + grid_size_display//2, food_x * grid_size_display + grid_size_display//2), 7, (0,100,0), -1, cv2.LINE_AA)

        # put with a dot on food position
        cv2.circle(img, (food_y * grid_size_display + grid_size_display//2, food_x * grid_size_display + grid_size_display//2), 7, (0,100,0), -1, cv2.LINE_AA)

        if not valid:
            # draw a big red cross
            cv2.line(img, (0, 0), (img.shape[1], img.shape[0]), (0, 0, 255), 5, cv2.LINE_AA)
            cv2.line(img, (0, img.shape[0]), (img.shape[1], 0), (0, 0, 255), 5, cv2.LINE_AA)
            cv2.putText(img, "invalid map", (int(img.shape[1]/2) - 100, int(img.shape[0]/2)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (200, 200, 200), 3, cv2.LINE_AA)
            cv2.putText(img, "invalid map", (int(img.shape[1]/2) - 100, int(img.shape[0]/2)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (200, 0, 0), 2, cv2.LINE_AA)
        return img

event_type = ""
event_x = 0
event_y = 0

def run_editor(landscape, state, goal, map_size=12):

    global event_type, event_x, event_y

    # define mouse callback function
    def input_cb(event,x,y,flags,param):
        global event_type, event_x, event_y
        if event == cv2.EVENT_MOUSEWHEEL:
            event_x = x
            event_y = y
            event_type = "flip_space"
        elif event == cv2.EVENT_LBUTTONUP:
            event_x = x
            event_y = y
            event_type = "set_start"
        elif event == cv2.EVENT_RBUTTONUP:
            event_x = x
            event_y = y
            event_type = "set_goal"

    cv2.namedWindow("img", cv2.WINDOW_GUI_NORMAL)
    cv2.setMouseCallback("img", input_cb)

    grid = np.array(landscape).reshape(map_size, map_size).transpose()
    valid = True
    grid_size_display = 20

    while True:

        if event_type == "flip_space":
            grid[event_y//grid_size_display, event_x//grid_size_display] = 1 - grid[event_y//grid_size_display, event_x//grid_size_display]
            event_type = "flip_space_done"
        elif event_type == "set_start":
            state = [event_y//grid_size_display, event_x//grid_size_display]
            event_type = "set_start_done"
        elif event_type == "set_goal":
            goal = [event_y//grid_size_display, event_x//grid_size_display]
            event_type = "set_goal_done"

        num_labels, labels, stats, centroids, num_freespace, landscape_img = check_num_labels(grid, map_size, map_size)
        non_zeros = np.count_nonzero(landscape)
        if not (num_labels == 2 and non_zeros >= 5):
            valid = False
        else:
            valid = True
        
        img = render(grid, state, goal, valid)
        cv2.imshow("img", img)
        k = cv2.waitKey(1)
        if k == ord('q'):
            break
        elif k == ord('s'):
            pth = "./logs/test.json"
            grid0 = grid.transpose()
            grid1 = grid0.reshape(map_size*map_size)
            save_current_task([grid1], state[0], state[1], goal[0], goal[1], pth)
            print("task saved to {}".format(pth))
        elif k == ord('r'):
            grid = np.array(landscape).reshape(map_size, map_size).transpose()
    
    grid_ = grid.transpose()
    grid_ = grid_.reshape(map_size*map_size).tolist()
    return grid_


def main():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    cv2.namedWindow("rnn_state_img", 0)

    k1 = jax.random.PRNGKey(npr.randint(0, 1000000))

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)
    
    leaf_1 = jnp.copy(tree_leaves[1])
    leaf_3 = jnp.copy(tree_leaves[3])

    # # convert leaf_1 to np array
    leaf_1_np = np.array(leaf_1[0:128,:])
    print("shape of leaf_1_np: ", leaf_1_np.shape)
    # leaf_1_np = leaf_1_np + np.min(leaf_1_np)
    # leaf_1_np = leaf_1_np / np.max(leaf_1_np)
    # leaf_1_np = leaf_1_np * 255
    # leaf_1_np = leaf_1_np.astype(np.uint8)
    # leaf_1_np = cv2.applyColorMap(leaf_1_np, cv2.COLORMAP_HOT)
    # cv2.imshow("leaf_1", leaf_1_np)
    # cv2.waitKey(0)

    reward_signature = np.abs(np.array(leaf_1[132,:]))

    print("shape of reward_signature: ", reward_signature.shape)

    """ create landscape
    """
    random_task = True
    # check if file on rpl_config.task_pth exists
    if os.path.isfile(rpl_config.task_pth):
        random_task = False

    if random_task:
        landscape = generate_maze_pool(num_mazes=1, width=10, height=10)
        landscape = padding_landscapes(landscape, width=12, height=12)
    else:
        landscape, state, goal = load_task(pth = rpl_config.task_pth)
        landscape = [landscape]

    print("landscape :")
    print(landscape)

    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = landscape, width = 12, height = 12, num_envs_per_landscape = 1)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    if not random_task:
        # set states of GE
        GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
        GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
        # set goals of GE
        GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
        GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
        GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
        GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
        GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
        GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)

    concat_obs = GE.concat_obs

    rnn_state = model.initial_state(GE.num_envs)

    # pth = "./logs/state0.json"
    # with open(pth, "r") as f:
    #     data = json.load(f)
    #     rnn_state_json = data["data"]
    # rnn_state = jnp.array([rnn_state_json])

    step_count = 0
    render_id = 0

    # # create a canvas to hold rnn_state visualization
    # canvas = np.zeros((40, (rpl_config.nn_size+4)*10, 3), dtype=np.uint8)

    """ create video writer
    """
    if rpl_config.video_output == "True":
        img = GE.render(env_id = render_id)
        # record a video using opencv
        fourcc = cv2.VideoWriter_fourcc('F','M','P','4')
        out = cv2.VideoWriter(rpl_config.log_pth + 'output.mp4',fourcc, 10.0, (img.shape[1], img.shape[0]))

    trajectory = []
    rnn_state_waterfall = []

    neuron_interval = 10
    canvas_width = (rnn_state[0].shape[0]+4)*neuron_interval
    canvas_height = 600
    output_horizontal = int(canvas_width/2)
    vanilla_vertical = 200
    output_vertical = int(canvas_height * 0.9)
    contribution_height = 400
    rnn_state_img0 = np.zeros((canvas_height, canvas_width, 3), dtype=np.uint8)
    # # visualize leaf_3
    # max_w = jnp.max(jnp.abs(leaf_3))
    # for i in range(leaf_3.shape[0]):
    #     for j in range(leaf_3.shape[1]):
    #         w_ = leaf_3[i][j]
    #         color = int((abs(w_)/max_w)*255)
    #         cv2.line(rnn_state_img0, (i*neuron_interval, vanilla_vertical), (j*neuron_interval + output_horizontal, output_vertical), (color, color, color), 1)

    Energy_trace = []

    reset_ = True

    step_by_step = False
    manual_action = 0
    
    while True:

        step_count += 1

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        
        if not step_by_step:
            batched_actions = get_action_vmap(y1)
        else:
            batched_actions = jnp.array([manual_action])

        batched_goal_reached, concat_obs = GE.step(batched_actions, reset = reset_)

        rnn_state_waterfall.append(rnn_state[0].tolist())

        # compute the variance of rnn_state_waterfall in a window of 20 steps
        rnn_state_window = rnn_state_waterfall[-20:]
        rnn_state_window_var = np.var(rnn_state_window, axis=0)

        # print("rnn_state_window_var: ", rnn_state_window_var)
        # print("")

        """ compute the Energy of the vanilla network
        """
        if rpl_config.nn_type == "vanilla":
            E = -0.5 * rnn_state[0] @ leaf_1[0:rnn_state[0].shape[0]] @ rnn_state[0]
            print("E: ", E)
            Energy_trace.append(E)

        """ render the env
        """
        if rpl_config.visualization == "True" or rpl_config.video_output == "True":
            img = GE.render(env_id = render_id)
            if len(trajectory) > 1:
                for i in range(len(trajectory)-1):
                    cv2.line(img, (int(trajectory[i][1]), int(trajectory[i][0])), (int(trajectory[i+1][1]), int(trajectory[i+1][0])), (0,130,0), 2)
            
            """ visualize concat_obs, rnn_state, y1
            """
            rnn_state_img = rnn_state_img0.copy()
            # visualize rnn_state on rnn_state_img as bar chart
            rnn_state0 = rnn_state[0]
            for i in range(rnn_state0.shape[0]):
                y_color = rnn_state_window_var[i]*1000
                cv2.line(rnn_state_img, (i*neuron_interval, int(vanilla_vertical/2)), (i*neuron_interval, int(vanilla_vertical/2) + int(rnn_state0[i]*90)), (0,y_color,255), 4)

            # visualize y1 on rnn_state_img as bar chart
            y10 = y1[0]
            for i in range(y10.shape[0]):
                cv2.line(rnn_state_img, (i*neuron_interval + output_horizontal, output_vertical), (i*neuron_interval + output_horizontal, output_vertical + int(y10[i]*30)), (0,255,0), 4)

            # leaf_3_mean = jnp.mean(leaf_3, axis=1)
            # rnn_state0_contrib = jnp.multiply(leaf_3_mean, rnn_state0)
            # for i in range(rnn_state0_contrib.shape[0]):
            #     cv2.line(rnn_state_img, (i*neuron_interval, contribution_height), (i*neuron_interval, contribution_height + int(rnn_state0_contrib[i]*90)), (0,200,0), 4)

            for i in range(reward_signature.shape[0]):
                cv2.line(rnn_state_img, (i*neuron_interval, contribution_height), (i*neuron_interval, contribution_height + int(reward_signature[i]*90)), (0,200,0), 4)

            
        trajectory.append([20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10])

        if rpl_config.video_output == "True":
            # wirte the rendered image to the video
            out.write(img)

        if batched_goal_reached[render_id]:
            trajectory.clear()

        """ scene display
        """
        if rpl_config.visualization == "True":

            cv2.imshow("img", img)
            cv2.imshow("rnn_state_img", rnn_state_img)

            if step_by_step:
                k = cv2.waitKey(0)
            else:
                k = cv2.waitKey(1)
            if k == ord('r'): 
                rnn_state_waterfall.clear()
                rnn_state = model.initial_state(GE.num_envs)
                GE.rnd_goal_collection = get_rnd_goal_collection_vmap(GE.env_keys, GE.batched_envs, GE.width, GE.height, GE.num_free_spaces)
                GE.reset()
                if not random_task:
                    # set states of GE
                    GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
                    GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
                    # set goals of GE
                    GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
                    GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
                    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
                    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
                    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
                    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
                    concat_obs = GE.concat_obs
                    
                trajectory.clear()
            elif k == ord('p'):
                # 1. take a random action
                k2, _ = jax.random.split(k1)
                k1 = k2
                random_action = jax.random.randint(k1, shape=(GE.num_envs, ), minval=0, maxval=4)
                batched_goal_reached, concat_obs = GE.step(random_action)
                print("random action: ", random_action)

                # # 2. add a perturbation to the state
                # k2, _ = jax.random.split(k1)
                # k1 = k2
                # random_perturbation = jax.random.normal(k1, (1, rpl_config.nn_size))
                # rnn_state = rnn_state.at[0].set(rnn_state[0] + random_perturbation[0])

                # # 3. zero-out the states whose variances are low
                # print("rnn_state before zeroing: ", rnn_state)
                # rnn_state = rnn_state.at[0].set(jnp.where(rnn_state_window_var<=0.01, 0, rnn_state[0]))
                # print("rnn_state after zeroing: ", rnn_state)

                # # 4. zero-out the states whose variances are high
                # print("rnn_state before zeroing: ", rnn_state)
                # rnn_state = rnn_state.at[0].set(jnp.where(rnn_state_window_var>=0.02, 0, rnn_state[0]))
                # print("rnn_state after zeroing: ", rnn_state)
            # elif k == ord('s'):
            #     reset_ = not reset_
            elif k == ord('n'):
                if random_task:
                    rnn_state = model.initial_state(GE.num_envs)
                    landscape = generate_maze_pool(num_mazes=1, width=10, height=10)
                    landscape = padding_landscapes(landscape, width=12, height=12)
                    GE.set_landscapes(landscape)
                    GE.reset()
                    trajectory.clear()
                    rnn_state_waterfall.clear()
            elif k == ord('q'):
                if rpl_config.video_output == "True":
                    # save video
                    out.release()
                break
            elif k == ord('t'):
                step_by_step = not step_by_step
            elif k == ord('w'):
                manual_action = 3
            elif k == ord('s'):
                manual_action = 2
            elif k == ord('a'):
                manual_action = 1
            elif k == ord('d'):
                manual_action = 0
            elif k == ord('e'):
                new_landscape = run_editor(landscape[0], GE.batched_states[0], GE.batched_goals[0])
                GE.set_landscapes_only([new_landscape])
                landscape[0] = new_landscape
                
    # # draw Energy_trace as a line chart
    # plt.figure()
    # plt.plot(Energy_trace)
    # # plot the windowed mean of Energy_trace
    # window_size = 100
    # Energy_trace_window_mean = np.convolve(Energy_trace, np.ones((window_size,))/window_size, mode='valid')
    # # Energy_trace_window_mean with different color
    # plt.plot(Energy_trace_window_mean, color='red')
    # plt.xlabel("time step")
    # plt.ylabel("Energy")
    # plt.title("Energy trace window mean")
    # plt.show()

if __name__ == "__main__":
    main()