from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import matplotlib.pyplot as plt

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json"):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        print("state: ", state)
        print("goal: ", goal)
        print("landscape: ", landscape)
    return landscape, state, goal

def render(grid, state, goal, valid = True):
        
        state_x = int(state[0])
        state_y = int(state[1])

        food_x = int(goal[0])
        food_y = int(goal[1])

        grid_size_display = 20
        width, height = grid.shape[0], grid.shape[1]
        img = np.zeros((width * grid_size_display, height * grid_size_display, 3), np.uint8)
        
        for j in range(width):
            for i in range(height):
                if grid[j,i] == 1:
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (255, 255, 255), -1)
                    # draw border with color(100,100,100)
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (100, 100, 100), 1)
                else:
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (0, 0, 0), -1)
                    # draw border with color(100,100,100)
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (100, 100, 100), 1)
                if j == state_x and i == state_y:
                    cv2.circle(img, (i * grid_size_display + int(grid_size_display/2), j * grid_size_display + int(grid_size_display/2)), 7, (0, 0, 255), -1, cv2.LINE_AA)
        
        # put with a dot on food position
        cv2.circle(img, (food_y * grid_size_display + grid_size_display//2, food_x * grid_size_display + grid_size_display//2), 7, (0,100,0), -1, cv2.LINE_AA)

        # put with a dot on food position
        cv2.circle(img, (food_y * grid_size_display + grid_size_display//2, food_x * grid_size_display + grid_size_display//2), 7, (0,100,0), -1, cv2.LINE_AA)

        if not valid:
            # draw a big red cross
            cv2.line(img, (0, 0), (img.shape[1], img.shape[0]), (0, 0, 255), 5, cv2.LINE_AA)
            cv2.line(img, (0, img.shape[0]), (img.shape[1], 0), (0, 0, 255), 5, cv2.LINE_AA)
            cv2.putText(img, "invalid map", (int(img.shape[1]/2) - 100, int(img.shape[0]/2)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (200, 200, 200), 3, cv2.LINE_AA)
            cv2.putText(img, "invalid map", (int(img.shape[1]/2) - 100, int(img.shape[0]/2)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (200, 0, 0), 2, cv2.LINE_AA)
        return img

event_type = ""
event_x = 0
event_y = 0

def run_editor(landscape, state, goal, map_size=12):

    global event_type, event_x, event_y

    # define mouse callback function
    def input_cb(event,x,y,flags,param):
        global event_type, event_x, event_y
        if event == cv2.EVENT_MBUTTONUP:
            event_x = x
            event_y = y
            event_type = "flip_space"
        elif event == cv2.EVENT_LBUTTONUP:
            event_x = x
            event_y = y
            event_type = "set_start"
        elif event == cv2.EVENT_RBUTTONUP:
            event_x = x
            event_y = y
            event_type = "set_goal"

    cv2.namedWindow("img", cv2.WINDOW_GUI_NORMAL)
    cv2.setMouseCallback("img", input_cb)

    grid = np.array(landscape).reshape(map_size, map_size).transpose()
    valid = True
    grid_size_display = 20

    while True:

        if event_type == "flip_space":
            grid[event_y//grid_size_display, event_x//grid_size_display] = 1 - grid[event_y//grid_size_display, event_x//grid_size_display]
            event_type = "flip_space_done"

        num_labels, labels, stats, centroids, num_freespace, landscape_img = check_num_labels(grid, map_size, map_size)
        non_zeros = np.count_nonzero(landscape)
        if not (num_labels == 2 and non_zeros >= 5):
            valid = False
        else:
            valid = True
        
        img = render(grid, state, goal, valid)
        cv2.imshow("img", img)
        k = cv2.waitKey(1)
        if k == ord('q'):
            break
        elif k == ord('r'):
            grid = np.array(landscape).reshape(map_size, map_size).transpose()
    
    grid_ = grid.transpose()
    grid_ = grid_.reshape(map_size*map_size).tolist()
    return grid_


def main():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    cv2.namedWindow("rnn_state_img", 0)
    # cv2.namedWindow("img", cv2.WINDOW_GUI_NORMAL)

    k1 = jax.random.PRNGKey(npr.randint(0, 1000000))

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # # 定义一个函数，用于生成随机权重
    # def init_weights_r(key, shape):
    #     return jax.random.normal(key, shape)
    # # 生成一个随机的 PRNGKey
    # key = jax.random.PRNGKey(np.random.randint(0, 1000))
    # # key = jax.random.PRNGKey(4512)
    # # 使用 tree_map 遍历 params 对象，并使用 init_weights 函数生成随机权重
    # random_params = jax.tree_map(lambda x: init_weights_r(key, x.shape), params)
    # params = random_params

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)
    
    leaf_1 = jnp.copy(tree_leaves[1])
    leaf_3 = jnp.copy(tree_leaves[3])

    """ create landscape
    """
    random_task = True
    # check if file on rpl_config.task_pth exists
    if os.path.isfile(rpl_config.task_pth):
        random_task = False

    if random_task:
        landscape = generate_maze_pool(num_mazes=1, width=10, height=10)
        landscape = padding_landscapes(landscape, width=12, height=12)
    else:
        landscape, state, goal = load_task(pth = rpl_config.task_pth)
        landscape = [landscape]

    print("landscape :")
    print(landscape)

    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    # blank_space = [1 for _ in range(12*12)]
    # # make borders
    # for i in range(12):
    #     blank_space[i] = 0
    #     blank_space[11*12 + i] = 0
    #     blank_space[i*12] = 0
    #     blank_space[i*12 + 11] = 0
    # landscape[0] = blank_space

    start_time = time.time()
    GE = GridEnv(landscapes = landscape, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    if not random_task:
        # set states of GE
        GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
        GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
        # set goals of GE
        GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
        GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
        GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
        GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
        GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
        GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)

    concat_obs = GE.concat_obs

    rnn_state = model.initial_state(GE.num_envs)
    step_count = 0
    render_id = 0

    """ create video writer
    """
    if rpl_config.video_output == "True":
        img = GE.render(env_id = render_id)
        # record a video using opencv
        # fourcc = cv2.VideoWriter_fourcc('F','M','P','4')
        fourcc = cv2.VideoWriter_fourcc('f','m','p','4')
        # fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(rpl_config.log_pth + 'output.mp4',fourcc, 10.0, (img.shape[1], img.shape[0]))

    trajectory = []
    rnn_state_waterfall = []

    """ rnn state visualization
    """
    neuron_interval = 10
    neuron_interval1 = 6
    canvas_width = 400 + (rnn_state[0].shape[0]+4)*neuron_interval
    canvas_height = 900
    output_horizontal = int(canvas_width/2)
    vanilla_vertical = 200
    output_vertical = int(canvas_height * 0.9)
    contribution_height = 400
    # create a canvas to hold rnn_state visualization
    rnn_state_img0 = np.zeros((canvas_height, canvas_width, 3), dtype=np.uint8)

    step_by_step = False
    manual_action = 0

    # var_clt_mean_less_than_0_1 = [1, 5, 6, 11, 13, 14, 15, 16, 17, 18, 19, 22, 23, 24, 25, 27, 28, 34, 44, 46, 57, 62, 65, 68, 80, 82, 83, 88, 89, 96, 98, 100, 102, 104, 108, 113, 116, 120, 122]
    # rnn_feature_0 =  [0.9999582767486572, -0.9831897020339966, 0.9996467232704163, -0.9994123578071594, -0.999851405620575, -0.9999927878379822, 0.999983549118042, 0.9612636566162109, 0.9998806715011597, 0.9932894706726074, -0.9997431635856628, 0.9999991059303284, 0.9999998807907104, -0.9999634027481079, 0.9999784231185913, 0.9996755123138428, -0.9992828369140625, 0.8952787518501282, -0.9999989867210388, 0.9748570919036865, 0.9935382008552551, -0.9999995827674866, 0.9871329665184021, 0.9740328788757324, -0.9953449368476868, 0.9701042771339417, -0.9991737604141235, -0.9999988675117493, -0.9990499019622803, -0.999997079372406, 0.9949736595153809, 0.9822999238967896, -0.9999629855155945, -0.9999940991401672, -0.9986229538917542, 0.9996981024742126, 0.9999974370002747, 0.9871447682380676, -0.9999998807907104]
    # rnn_feature_1 =  [-0.9998412728309631, 0.9671010375022888, -0.9999982714653015, 0.9992090463638306, 0.9994693994522095, 0.9999602437019348, -0.9925903081893921, -0.9984927773475647, -0.9941282868385315, 0.9965940713882446, 0.9773151278495789, -0.9977096915245056, -0.9996675252914429, 0.9999555349349976, -0.9999988675117493, -0.9978929758071899, 0.9999995231628418, -0.9997955560684204, 0.9996235966682434, 0.9973439574241638, -0.9999579787254333, 0.9999728798866272, 0.9823980927467346, -0.9997960329055786, 0.9941484332084656, -0.9999427795410156, 0.9995871186256409, 0.9999239444732666, 0.9942917823791504, 0.9999849796295166, -0.9987909197807312, -0.9999961256980896, 0.9863384366035461, 0.9999999403953552, 0.9593451619148254, -0.9977843165397644, -0.9793924689292908, -0.9543303847312927, 0.9943166375160217]
    # rnn_feature_2 =  [0.9999920725822449, -0.9973777532577515, 0.9999567866325378, -0.9910239577293396, -0.9999980926513672, -0.9999908804893494, 0.9994378089904785, 0.9657203555107117, 0.9999774098396301, -0.9924333691596985, -0.9999697804450989, 0.9999998807907104, 0.9999997615814209, -0.9969027638435364, 0.9999698996543884, 0.9999940991401672, -0.999999463558197, 0.9991713166236877, -0.9999998807907104, -0.9957963228225708, 0.9988913536071777, -0.9999998807907104, -0.9945865869522095, 0.9999521970748901, -0.9996950626373291, 0.9985164403915405, -0.998382568359375, -0.9999978542327881, -0.9999244809150696, -0.9996201395988464, 0.999308168888092, 0.9999228715896606, -0.9999930262565613, -0.9999990463256836, -0.9861818552017212, 0.9988118410110474, 0.9999998807907104, 0.9948789477348328, -0.9999997615814209]
    # rnn_feature_3 =  [-0.9705339074134827, 0.9958152174949646, -0.9995390772819519, 0.9999624490737915, 0.999870240688324, 0.9999515414237976, -0.9939287304878235, -0.9997881054878235, -0.9936801791191101, -0.977760910987854, 0.9972586035728455, -0.9999999403953552, -0.9999999403953552, 0.9999993443489075, -0.9999997019767761, -0.9981520771980286, 0.994085431098938, -0.9939032793045044, 0.9999999403953552, -0.991743266582489, -0.9992260932922363, 0.9999999403953552, -0.9556638598442078, -0.9771517515182495, 0.9983199238777161, -0.9999507069587708, 0.9929742217063904, 0.9999997615814209, 0.9999983906745911, 0.9997593760490417, -0.9996225833892822, -0.9998718500137329, 0.9999980330467224, 0.9999902844429016, 0.9994619488716125, -0.9978642463684082, -0.9999993443489075, -0.9995915293693542, 0.9999999403953552]

    var_clt_mean_less_than_0_1 = [0, 1, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 39, 41, 43, 44, 46, 49, 52, 54, 57, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 71, 74, 77, 79, 80, 82, 83, 84, 85, 86, 88, 89, 90, 91, 92, 96, 97, 98, 99, 100, 101, 102, 103, 104, 106, 107, 108, 109, 110, 113, 115, 116, 118, 120, 121, 122, 123, 124, 125, 126]
    rnn_feature_0 =  [-0.9461527466773987, 0.9999582767486572, -0.975846529006958, -0.9906558990478516, -0.9831897020339966, 0.9996467232704163, 0.9916031956672668, 0.9968320727348328, -0.9124768972396851, -0.9994123578071594, -0.8605355620384216, -0.999851405620575, -0.9999927878379822, 0.999983549118042, 0.9612636566162109, 0.9998806715011597, 0.9932894706726074, -0.9997431635856628, -0.9999997019767761, 0.9999991059303284, 0.9999998807907104, -0.9999634027481079, 0.9999784231185913, 0.9996755123138428, -0.9992828369140625, 0.8932662010192871, -0.9987356662750244, 0.9904150366783142, -0.999648928642273, 0.9468201994895935, 0.8952787518501282, -0.9989546537399292, -0.9982856512069702, 0.9999709129333496, -0.9981957077980042, -0.9974076151847839, -0.9678183197975159, -0.9999989867210388, 0.9748570919036865, -0.9999744892120361, 0.9587243795394897, 0.9830151200294495, 0.9935382008552551, -0.9996662139892578, -0.999999463558197, -0.9789413213729858, -0.9999995827674866, -0.8860974311828613, 0.9345378875732422, 0.9871329665184021, 0.8611211180686951, -0.9988968968391418, 0.9740328788757324, 0.995445966720581, 0.9981663227081299, -0.9995632767677307, 0.9999988675117493, -0.8693735599517822, -0.9953449368476868, 0.9701042771339417, -0.9991737604141235, -0.9907732009887695, -0.990005612373352, 0.9999998807907104, -0.9999988675117493, -0.9990499019622803, -0.946376621723175, 0.9362999200820923, -0.9999972581863403, -0.999997079372406, 0.9736419916152954, 0.9949736595153809, -0.964794933795929, 0.9822999238967896, -0.970579206943512, -0.9999629855155945, -0.999987781047821, -0.9999940991401672, -0.9361556768417358, -0.9990412592887878, -0.9986229538917542, -0.9984415769577026, 0.9999998211860657, 0.9996981024742126, -0.9994224309921265, 0.9999974370002747, 0.9792995452880859, 0.9871447682380676, 0.9999986886978149, -0.9999998807907104, 0.9999998807907104, 0.9490108489990234, 0.9829628467559814, -0.9778906106948853]
    rnn_feature_1 =  [-0.9283203482627869, -0.9998435378074646, -0.9638441205024719, 0.9988398551940918, 0.967153012752533, -0.9999982714653015, -0.9427096843719482, -0.9438421130180359, 0.9670294523239136, 0.999212384223938, -0.9875056743621826, 0.9994784593582153, 0.9999609589576721, -0.9926629662513733, -0.998508095741272, -0.9942294359207153, 0.9966515898704529, 0.9777061939239502, 0.9663264751434326, -0.9977492094039917, -0.9996732473373413, 0.9999483227729797, -0.9999988675117493, -0.9979292154312134, 0.9999995231628418, -0.9752395749092102, 0.922939658164978, 0.9681260585784912, -0.9128158092498779, -0.9994750022888184, -0.9997990131378174, 0.917654275894165, 0.9677172303199768, -0.9653894305229187, -0.8428236842155457, 0.9987689256668091, -0.9331029057502747, 0.9996300935745239, 0.9973886609077454, 0.9643940925598145, 0.9969982504844666, -0.9655174612998962, -0.9999585151672363, 0.965191125869751, 0.9669886231422424, 0.9599414467811584, 0.9999733567237854, 0.9693759679794312, 0.9535602331161499, 0.9827015399932861, 0.9561278820037842, 0.9657979011535645, -0.9997995495796204, -0.9655174016952515, -0.9558514356613159, 0.9520189762115479, -0.9576465487480164, -0.9739927053451538, 0.9942493438720703, -0.9999437928199768, 0.9995939135551453, 0.9995091557502747, 0.9656971096992493, -0.9655343890190125, 0.9999252557754517, 0.9943885207176208, 0.9999865889549255, -0.9767569899559021, 0.9655064940452576, 0.9999851584434509, 0.9700678586959839, -0.9988107085227966, -0.9514037370681763, -0.9999961853027344, 0.958463728427887, 0.9865739941596985, 0.8932945728302002, 0.9999999403953552, 0.9678045511245728, 0.9656780362129211, 0.9585627317428589, 0.9654185771942139, -0.9651579260826111, -0.9978225231170654, 0.965463399887085, -0.9797477722167969, -0.9656004309654236, -0.9550648331642151, -0.9615163207054138, 0.9944142699241638, -0.9655593633651733, -0.9863012433052063, 0.8636111617088318, 0.9887285828590393]
    rnn_feature_2 =  [0.9958351254463196, 0.9999920725822449, 0.9943109154701233, -0.9985464215278625, -0.9973777532577515, 0.9999567866325378, 0.9958251118659973, 0.9876009225845337, -0.9994816184043884, -0.9910239577293396, 0.9949482679367065, -0.9999980926513672, -0.9999908804893494, 0.9994378089904785, 0.9657203555107117, 0.9999774098396301, -0.9924333691596985, -0.9999697804450989, -0.9962242245674133, 0.9999998807907104, 0.9999997615814209, -0.9969027638435364, 0.9999698996543884, 0.9999940991401672, -0.999999463558197, 0.9914127588272095, -0.9996015429496765, -0.9995436072349548, 0.97539883852005, 0.9999948143959045, 0.9991713166236877, -0.9886994957923889, -0.9960036873817444, 0.9949798583984375, 0.9921016693115234, -0.9937861561775208, 0.9988359212875366, -0.9999998807907104, -0.9957963228225708, -0.9823622107505798, -0.9970131516456604, 0.9999969005584717, 0.9988913536071777, -0.9997882843017578, -0.9999831318855286, -0.9045405983924866, -0.9999998807907104, -0.9996019005775452, -0.9999549984931946, -0.9945865869522095, -0.9996899962425232, -0.9977795481681824, 0.9999521970748901, 0.9999998807907104, 0.9997276067733765, -0.692555844783783, 0.995883047580719, 0.9771542549133301, -0.9996950626373291, 0.9985164403915405, -0.998382568359375, -0.9999997615814209, -0.9999960064888, 0.9999998807907104, -0.9999978542327881, -0.9999244809150696, -0.9996898770332336, 0.9799578189849854, -0.999114990234375, -0.9996201395988464, -0.9984035491943359, 0.999308168888092, 0.9999555349349976, 0.9999228715896606, -0.9461102485656738, -0.9999930262565613, -0.9784282445907593, -0.9999990463256836, -0.9999871253967285, -0.9881334900856018, -0.9861818552017212, -0.9999997019767761, 0.9999986886978149, 0.9988118410110474, -0.9999892711639404, 0.9999998807907104, 0.9997080564498901, 0.9948789477348328, 0.9999954700469971, -0.9999997615814209, 0.9999998807907104, 0.9999998807907104, -0.917002260684967, -0.9996073842048645]
    rnn_feature_3 =  [0.929304838180542, -0.9705339074134827, 0.9548043012619019, 0.9257931709289551, 0.9958152174949646, -0.9995390772819519, -0.9857621788978577, -0.9858974814414978, 0.9543991088867188, 0.9999624490737915, 0.990217387676239, 0.999870240688324, 0.9999515414237976, -0.9939287304878235, -0.9997881054878235, -0.9936801791191101, -0.977760910987854, 0.9972586035728455, 1.0, -0.9999999403953552, -0.9999999403953552, 0.9999993443489075, -0.9999997019767761, -0.9981520771980286, 0.994085431098938, -0.9613664746284485, 0.9221969842910767, -0.9750844836235046, 0.9994914531707764, -0.9958109855651855, -0.9939032793045044, 0.9942757487297058, 0.9563018083572388, -0.9999991059303284, 0.995768666267395, 0.9596813917160034, 0.999951958656311, 0.9999999403953552, -0.991743266582489, 0.9800897836685181, -0.8371233940124512, -0.9999257922172546, -0.9992260932922363, 0.9999868273735046, 0.9999557137489319, 0.9107763171195984, 0.9999999403953552, 0.997278094291687, -0.9970739483833313, -0.9556638598442078, -0.9633026123046875, 0.9999330639839172, -0.9771517515182495, -0.9751230478286743, -0.9946858286857605, 0.9999380707740784, -0.9999024271965027, 0.8732730746269226, 0.9983199238777161, -0.9999507069587708, 0.9929742217063904, 0.8123788833618164, 0.9945662617683411, -0.9999999403953552, 0.9999997615814209, 0.9999983906745911, 0.9869656562805176, -0.9268302917480469, 0.9999990463256836, 0.9997593760490417, -0.98912513256073, -0.9996225833892822, 0.9998481273651123, -0.9998718500137329, 0.9972803592681885, 0.9999980330467224, 0.9946923851966858, 0.9999902844429016, 0.9754811525344849, 0.9999573826789856, 0.9994619488716125, 0.9924792051315308, -0.9999999403953552, -0.9978642463684082, 0.9987314343452454, -0.9999993443489075, -0.9999983906745911, -0.9995915293693542, -0.9999723434448242, 0.9999999403953552, -0.9999970197677612, -0.9448661804199219, -0.8927628397941589, 0.90985506772995]
    
    quadrant_beleived_old = -1
    orientation_switch_position = (20 * int(GE.batched_states[0][1])+10, 20 * int(GE.batched_states[0][0])+10)

    quadrant_trj = []
    trial_id = 0

    blind_obs = False

    trajectories = []

    for l in range(rpl_config.life_duration):

        step_count += 1

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        if not step_by_step:
            batched_actions = get_action_vmap(y1)
        else:
            batched_actions = jnp.array([manual_action])
        batched_goal_reached, concat_obs = GE.step(batched_actions, reset=True)

        trajectories.append(np.array(GE.batched_states[0]))

        # print(concat_obs)

        if blind_obs:
            concat_obs = jnp.where(concat_obs == 1, 0, concat_obs)

        rnn_state_waterfall.append(rnn_state[0].tolist())

        # compute the variance of rnn_state_waterfall in a window of 20 steps
        rnn_state_window = rnn_state_waterfall[-20:]
        rnn_state_window_var = np.var(rnn_state_window, axis=0)

        # print("rnn_state_window_var: ", rnn_state_window_var)
        # print("")

        static_feature = rnn_state[0, var_clt_mean_less_than_0_1]
        sim_to_feature0 = np.dot(static_feature, rnn_feature_0)
        sim_to_feature1 = np.dot(static_feature, rnn_feature_1)
        sim_to_feature2 = np.dot(static_feature, rnn_feature_2)
        sim_to_feature3 = np.dot(static_feature, rnn_feature_3)
        # print("sim_to_features: ", sim_to_feature0, sim_to_feature1, sim_to_feature2, sim_to_feature3)

        quadrant_beleived = np.argmax([sim_to_feature0, sim_to_feature1, sim_to_feature2, sim_to_feature3])
        max_sim = np.max([sim_to_feature0, sim_to_feature1, sim_to_feature2, sim_to_feature3])
        sim_th = -920

        quadrant_trj.append((20 * int(GE.batched_states[0][1])+10, 20 * int(GE.batched_states[0][0])+10))

        if (quadrant_beleived_old != quadrant_beleived or batched_goal_reached[render_id]):
            # analyse quadrant_trj
            inside_n = 0
            outside_n = 0
            if quadrant_beleived_old == 0:
                # count how many points in the right bottom quadrant
                for point in quadrant_trj:
                    if point[0] >= orientation_switch_position[0]-10 and point[1] >= orientation_switch_position[1]-10:
                        inside_n += 1
                    else:
                        outside_n += 1
            elif quadrant_beleived_old == 1:
                # count how many points in the left bottom quadrant
                for point in quadrant_trj:
                    if point[0] <= orientation_switch_position[0]+10 and point[1] >= orientation_switch_position[1]-10:
                        inside_n += 1
                    else:
                        outside_n += 1
            elif quadrant_beleived_old == 2:
                # count how many points in the right top quadrant
                for point in quadrant_trj:
                    if point[0] >= orientation_switch_position[0]-10 and point[1] <= orientation_switch_position[1]+10:
                        inside_n += 1
                    else:
                        outside_n += 1
            elif quadrant_beleived_old == 3:
                # count how many points in the left top quadrant
                for point in quadrant_trj:
                    if point[0] <= orientation_switch_position[0]+10 and point[1] <= orientation_switch_position[1]+10:
                        inside_n += 1
                    else:
                        outside_n += 1
                
            if len(quadrant_trj)>=10 and trial_id <= 3:

                if quadrant_beleived_old == 0:
                    narrow_dimension = min(abs(240-orientation_switch_position[0]), abs(240-orientation_switch_position[1]))
                elif quadrant_beleived_old == 1:
                    narrow_dimension = min(abs(0-orientation_switch_position[0]), abs(240-orientation_switch_position[1]))
                elif quadrant_beleived_old == 2:
                    narrow_dimension = min(abs(240-orientation_switch_position[0]), abs(0-orientation_switch_position[1]))
                elif quadrant_beleived_old == 3:
                    narrow_dimension = min(abs(0-orientation_switch_position[0]), abs(0-orientation_switch_position[1]))
                # print("inside_n: ", inside_n)
                # print("outside_n: ", outside_n)
                # print("narrow dimension: ", narrow_dimension)
                # print("-------------------------")

            quadrant_trj.clear()

        if quadrant_beleived_old != quadrant_beleived:
            orientation_switch_position = (20 * int(GE.batched_states[0][1])+10, 20 * int(GE.batched_states[0][0])+10)
            quadrant_beleived_old = quadrant_beleived
        if  batched_goal_reached[render_id]:
            trial_id += 1
            orientation_switch_position = (20 * int(GE.init_batched_states[0][1])+10, 20 * int(GE.init_batched_states[0][0])+10)

        """ render the env
        """
        if rpl_config.visualization == "True" or rpl_config.video_output == "True":
            img = GE.render(env_id = render_id)
            cbimg = GE.render4(env_id = render_id)
            if len(trajectory) > 1:
                for i in range(len(trajectory)-1):
                    cv2.line(img, (int(trajectory[i][1]), int(trajectory[i][0])), (int(trajectory[i+1][1]), int(trajectory[i+1][0])), (0,130,0), 2)
            # if max_sim > sim_th:
                
            #     # ego_point = (20 * int(GE.init_batched_states[0][1])+10, 20 * int(GE.init_batched_states[0][0])+10)
            #     # ego_point = (20 * int(GE.batched_states[0][1])+10, 20 * int(GE.batched_states[0][0])+10)
            #     ego_point = orientation_switch_position

            #     if quadrant_beleived == 0:
            #         # draw a transparent rectangle for the right bottom quadrant of img
            #         img1 = img.copy()
            #         cv2.rectangle(img1, (ego_point[0], ego_point[1]), (img.shape[1], img.shape[0]), (0,0,255), -1)
            #         # draw an arrow to the right bottom direction
            #         # cv2.arrowedLine(img1, (ego_point[0], ego_point[1]), (ego_point[0]+50, ego_point[1]+50), (0,0,255), 2)
            #         img = cv2.addWeighted(img1, 0.4, img, 0.6, 0)
            #     elif quadrant_beleived == 1:
            #         # draw a transparent rectangle for the left bottom quadrant of img
            #         img1 = img.copy()
            #         cv2.rectangle(img1, (ego_point[0], ego_point[1]), (0, img.shape[0]), (0,0,255), -1)
            #         # draw an arrow to the left bottom direction
            #         # cv2.arrowedLine(img1, (ego_point[0], ego_point[1]), (ego_point[0]-50, ego_point[1]+50), (0,0,255), 2)
            #         img = cv2.addWeighted(img1, 0.4, img, 0.6, 0)
            #     elif quadrant_beleived == 2:
            #         # draw a transparent rectangle for the right top quadrant of img
            #         img1 = img.copy()
            #         cv2.rectangle(img1, (ego_point[0], ego_point[1]), (img.shape[1], 0), (0,0,255), -1)
            #         # draw an arrow to the right top direction
            #         # cv2.arrowedLine(img1, (ego_point[0], ego_point[1]), (ego_point[0]+50, ego_point[1]-50), (0,0,255), 2)
            #         img = cv2.addWeighted(img1, 0.4, img, 0.6, 0)
            #     elif quadrant_beleived == 3:
            #         # draw a transparent rectangle for the left top quadrant of img
            #         img1 = img.copy()
            #         cv2.rectangle(img1, (ego_point[0], ego_point[1]), (0,0), (0,0,255), -1)
            #         # draw an arrow to the left top direction
            #         # cv2.arrowedLine(img1, (ego_point[0], ego_point[1]), (ego_point[0]-50, ego_point[1]-50), (0,0,255), 2)
            #         img = cv2.addWeighted(img1, 0.4, img, 0.6, 0)

            #     cv2.circle(img, ego_point, 7, (0,0,255), -1, cv2.LINE_AA)
            #     cv2.circle(img, ego_point, 5, (0,255,0), -1, cv2.LINE_AA)

            
            """ visualize concat_obs, rnn_state, y1
            """
            rnn_state_img = rnn_state_img0.copy()
            # visualize rnn_state on rnn_state_img as bar chart
            rnn_state0 = rnn_state[0]
            x_offset = 450
            for i in range(rnn_state0.shape[0]):
                y_color = rnn_state_window_var[i]*1000
                cv2.line(rnn_state_img, (i*neuron_interval, int(vanilla_vertical/2)), (i*neuron_interval, int(vanilla_vertical/2) + int(rnn_state0[i]*90)), (0,y_color,255), 4, cv2.LINE_AA)
            # put text "rnn_state" on the right side of rnn_state0
            cv2.putText(rnn_state_img, "rnn state", (rnn_state0.shape[0]*neuron_interval + 30, int(vanilla_vertical/2)+90), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255,255,255), 2, cv2.LINE_AA)
            # # visualize static_feature on rnn_state_img as bar chart
            # for i in range(static_feature.shape[0]):
            #     y_color = rnn_state_window_var[i]*1000  # TODO: change to static_feature_window_var
            #     cv2.line(rnn_state_img, (i*neuron_interval+x_offset, int(contribution_height)), (i*neuron_interval+x_offset, int(contribution_height) + int(static_feature[i]*90)), (0,y_color,255), 4, cv2.LINE_AA)
            # # draw a rectangle around static_feature
            # cv2.rectangle(rnn_state_img, (0*neuron_interval+x_offset, int(contribution_height)-90), (static_feature.shape[0]*neuron_interval+x_offset, int(contribution_height)+90), (100,100,100), 2)
            # # put text "static_feature" on the right side of static_feature
            # cv2.putText(rnn_state_img, "static feature", (rnn_state0.shape[0]*neuron_interval + 30, int(contribution_height)+90), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255,255,255), 2, cv2.LINE_AA)
            # # draw line from rnn_state0[var_clt_mean_less_than_0_1] to static_feature
            # for i in range(len(var_clt_mean_less_than_0_1)):
            #     start_x = var_clt_mean_less_than_0_1[i]*neuron_interval
            #     start_y = int(vanilla_vertical/2)+90
            #     end_x = i*neuron_interval + x_offset
            #     end_y = int(contribution_height)-90
            #     cv2.line(rnn_state_img, (start_x, start_y), (end_x, end_y), (0,0,255), 1, cv2.LINE_AA)
            # features_interval = 330
            # # visualize rnn_feature_0 on rnn_state_img as bar chart
            # x_offset = 30
            # for i in range(len(rnn_feature_0)):
            #     y_color = rnn_state_window_var[i]*1000
            #     feature_height = int(contribution_height) + 300
            #     cv2.line(rnn_state_img, (i*neuron_interval1+x_offset, feature_height), (i*neuron_interval1+x_offset, feature_height + int(rnn_feature_0[i]*60)), (0,200,0), 2, cv2.LINE_AA)
            # # draw a rectangle around rnn_feature_0
            # cv2.rectangle(rnn_state_img, (0*neuron_interval1+x_offset, feature_height-60), (len(rnn_feature_0)*neuron_interval1+x_offset, feature_height+60), (100,100,100), 2)
            # # put text "right bottom" under the rectangle
            # cv2.putText(rnn_state_img, "right bottom", (x_offset, feature_height+100), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255), 2, cv2.LINE_AA)
            # # visualize rnn_feature_1 on rnn_state_img as bar chart
            # x_offset += features_interval
            # for i in range(len(rnn_feature_1)):
            #     y_color = rnn_state_window_var[i]*1000
            #     feature_height = int(contribution_height) + 300
            #     cv2.line(rnn_state_img, (i*neuron_interval1+x_offset, feature_height), (i*neuron_interval1+x_offset, feature_height + int(rnn_feature_1[i]*60)), (0,200,0), 2, cv2.LINE_AA)
            # # draw a rectangle around rnn_feature_1
            # cv2.rectangle(rnn_state_img, (0*neuron_interval1+x_offset, feature_height-60), (len(rnn_feature_1)*neuron_interval1+x_offset, feature_height+60), (100,100,100), 2)
            # # put text "left bottom" under the rectangle
            # cv2.putText(rnn_state_img, "left bottom", (x_offset, feature_height+100), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255), 2, cv2.LINE_AA)
            # # visualize rnn_feature_2 on rnn_state_img as bar chart
            # x_offset += features_interval
            # for i in range(len(rnn_feature_2)):
            #     y_color = rnn_state_window_var[i]*1000
            #     feature_height = int(contribution_height) + 300
            #     cv2.line(rnn_state_img, (i*neuron_interval1+x_offset, feature_height), (i*neuron_interval1+x_offset, feature_height + int(rnn_feature_2[i]*60)), (0,200,0), 2, cv2.LINE_AA)
            # # draw a rectangle around rnn_feature_2
            # cv2.rectangle(rnn_state_img, (0*neuron_interval1+x_offset, feature_height-60), (len(rnn_feature_2)*neuron_interval1+x_offset, feature_height+60), (100,100,100), 2)
            # # put text "right top" under the rectangle
            # cv2.putText(rnn_state_img, "right top", (x_offset, feature_height+100), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255), 2, cv2.LINE_AA)
            # # visualize rnn_feature_3 on rnn_state_img as bar chart
            # x_offset += features_interval
            # for i in range(len(rnn_feature_3)):
            #     y_color = rnn_state_window_var[i]*1000
            #     feature_height = int(contribution_height) + 300
            #     cv2.line(rnn_state_img, (i*neuron_interval1+x_offset, feature_height), (i*neuron_interval1+x_offset, feature_height + int(rnn_feature_3[i]*60)), (0,200,0), 2, cv2.LINE_AA)
            # # draw a rectangle around rnn_feature_3
            # cv2.rectangle(rnn_state_img, (0*neuron_interval1+x_offset, feature_height-60), (len(rnn_feature_3)*neuron_interval1+x_offset, feature_height+60), (100,100,100), 2)
            # # put text "left top" under the rectangle
            # cv2.putText(rnn_state_img, "left top", (x_offset, feature_height+100), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255), 2, cv2.LINE_AA)
            # # put text "feature template" on the right side of the rectangle
            # cv2.putText(rnn_state_img, "feature template", (rnn_state0.shape[0]*neuron_interval + 30, feature_height+30), cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255,255,255), 2, cv2.LINE_AA)

            # if max_sim > sim_th:
            #     if quadrant_beleived == 0:
            #         # link the static_feature to the rnn_feature_0
            #         cv2.line(rnn_state_img, (static_feature.shape[0]*neuron_interval//2+450, int(contribution_height)+90), (len(rnn_feature_0)*neuron_interval1//2+30, feature_height-60), (0,255,0), 3, cv2.LINE_AA)
            #     elif quadrant_beleived == 1:
            #         # link the static_feature to the rnn_feature_1
            #         cv2.line(rnn_state_img, (static_feature.shape[0]*neuron_interval//2+450, int(contribution_height)+90), (len(rnn_feature_1)*neuron_interval1//2+30+features_interval, feature_height-60), (0,255,0), 3, cv2.LINE_AA)
            #     elif quadrant_beleived == 2:
            #         # link the static_feature to the rnn_feature_2
            #         cv2.line(rnn_state_img, (static_feature.shape[0]*neuron_interval//2+450, int(contribution_height)+90), (len(rnn_feature_2)*neuron_interval1//2+30+features_interval*2, feature_height-60), (0,255,0), 3, cv2.LINE_AA)
            #     elif quadrant_beleived == 3:
            #         # link the static_feature to the rnn_feature_3
            #         cv2.line(rnn_state_img, (static_feature.shape[0]*neuron_interval//2+450, int(contribution_height)+90), (len(rnn_feature_3)*neuron_interval1//2+30+features_interval*3, feature_height-60), (0,255,0), 3, cv2.LINE_AA)

        trajectory.append([20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10])

        if batched_goal_reached[render_id]:
            print("len of trajectory: ", len(trajectory))
            trajectory.clear()

        """ scene display
        """
        if rpl_config.visualization == "True":

            # # cv2.imshow("img", img)
            # # cv2.imshow("rnn_state_img", rnn_state_img)
            # # scale img to match the height of rnn_state_img, and concatenate them horizontally
            # scale_ = rnn_state_img.shape[0] / img.shape[0]
            # img2 = cv2.resize(img, (int(img.shape[1]*scale_), int(img.shape[0]*scale_)))
            # img2 = np.concatenate((rnn_state_img, img2), axis=1)
            # if blind_obs:
            #     # put text blind_obs on the bottom left corner
            #     cv2.putText(img2, "blind_obs", (10, img2.shape[0]-10), cv2.FONT_HERSHEY_SIMPLEX, 5.0, (255,255,255), 2, cv2.LINE_AA)
            # cv2.imshow("rnn_state_img", img2)
            cv2.imshow("img", img)
            cv2.imshow("cbimg", cbimg)

            if rpl_config.video_output == "True":
                # wirte the rendered image to the video
                out.write(img)
                # print("write frame to video")
            
            if step_by_step:
                k = cv2.waitKey(0)
            else:
                k = cv2.waitKey(1)
            if k == ord('r'): 
                quadrant_trj = []
                rnn_state_waterfall.clear()
                rnn_state = model.initial_state(GE.num_envs)
                GE.rnd_goal_collection = get_rnd_goal_collection_vmap(GE.env_keys, GE.batched_envs, GE.width, GE.height, GE.num_free_spaces)
                GE.reset()
                if not random_task:
                    # set states of GE
                    GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
                    GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
                    # set goals of GE
                    GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
                    GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
                    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
                    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
                    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
                    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
                    concat_obs = GE.concat_obs
                    
                trajectory.clear()
            elif k == ord('q'):
                if rpl_config.video_output == "True":
                    # save video
                    out.release()
                break
            elif k == ord('t'):
                step_by_step = not step_by_step
            elif k == ord('b'):
                blind_obs = not blind_obs
                print("blind_obs: ", blind_obs)

    rnn_state_waterfall = np.array(rnn_state_waterfall)
    print("shape of rnn_state_waterfall: ", rnn_state_waterfall.shape)

    def count_non_effective_actions(traj):
        # 计算相邻两个时间步的坐标是否相同
        NEA = np.all(traj[1:] == traj[:-1], axis=1)
        # 统计相同坐标的次数
        count = np.sum(NEA)
        return count, NEA
    print("shape of trajectories: ", np.array(trajectories).shape)
    count, NEA = count_non_effective_actions(np.array(trajectories))

    print("shape of NEA: ", NEA.shape)

    vars = [1 for i in range(20)]
    for i in range(20, rnn_state_waterfall.shape[0]):
        rnn_state_window = rnn_state_waterfall[i-20:i]
        rnn_state_window_var = np.var(rnn_state_window, axis=0)
        vars.append(np.mean(rnn_state_window_var))

    
    plt.plot(NEA)
    plt.plot(vars)
    plt.show()


            
if __name__ == "__main__":
    main()