from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt

gpu_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal

def render(grid, state, goal, valid = True):
        
        state_x = int(state[0])
        state_y = int(state[1])

        food_x = int(goal[0])
        food_y = int(goal[1])

        grid_size_display = 20
        width, height = grid.shape[0], grid.shape[1]
        img = np.zeros((width * grid_size_display, height * grid_size_display, 3), np.uint8)
        
        for j in range(width):
            for i in range(height):
                if grid[j,i] == 1:
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (255, 255, 255), -1)
                    # draw border with color(100,100,100)
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (100, 100, 100), 1)
                else:
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (0, 0, 0), -1)
                    # draw border with color(100,100,100)
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (100, 100, 100), 1)
                if j == state_x and i == state_y:
                    cv2.circle(img, (i * grid_size_display + int(grid_size_display/2), j * grid_size_display + int(grid_size_display/2)), 7, (0, 0, 255), -1, cv2.LINE_AA)
        
        # put with a dot on food position
        cv2.circle(img, (food_y * grid_size_display + grid_size_display//2, food_x * grid_size_display + grid_size_display//2), 7, (0,100,0), -1, cv2.LINE_AA)

        # put with a dot on food position
        cv2.circle(img, (food_y * grid_size_display + grid_size_display//2, food_x * grid_size_display + grid_size_display//2), 7, (0,100,0), -1, cv2.LINE_AA)

        if not valid:
            # draw a big red cross
            cv2.line(img, (0, 0), (img.shape[1], img.shape[0]), (0, 0, 255), 5, cv2.LINE_AA)
            cv2.line(img, (0, img.shape[0]), (img.shape[1], 0), (0, 0, 255), 5, cv2.LINE_AA)
            cv2.putText(img, "invalid map", (int(img.shape[1]/2) - 100, int(img.shape[0]/2)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (200, 200, 200), 3, cv2.LINE_AA)
            cv2.putText(img, "invalid map", (int(img.shape[1]/2) - 100, int(img.shape[0]/2)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (200, 0, 0), 2, cv2.LINE_AA)
        return img

event_type = ""
event_x = 0
event_y = 0

def run_editor(landscape, state, goal, map_size=12):

    global event_type, event_x, event_y

    # define mouse callback function
    def input_cb(event,x,y,flags,param):
        global event_type, event_x, event_y
        if event == cv2.EVENT_MBUTTONUP:
            event_x = x
            event_y = y
            event_type = "flip_space"
        elif event == cv2.EVENT_LBUTTONUP:
            event_x = x
            event_y = y
            event_type = "set_start"
        elif event == cv2.EVENT_RBUTTONUP:
            event_x = x
            event_y = y
            event_type = "set_goal"

    cv2.namedWindow("img", cv2.WINDOW_GUI_NORMAL)
    cv2.setMouseCallback("img", input_cb)

    grid = np.array(landscape).reshape(map_size, map_size).transpose()
    valid = True
    grid_size_display = 20

    while True:

        if event_type == "flip_space":
            grid[event_y//grid_size_display, event_x//grid_size_display] = 1 - grid[event_y//grid_size_display, event_x//grid_size_display]
            event_type = "flip_space_done"

        num_labels, labels, stats, centroids, num_freespace, landscape_img = check_num_labels(grid, map_size, map_size)
        non_zeros = np.count_nonzero(landscape)
        if not (num_labels == 2 and non_zeros >= 5):
            valid = False
        else:
            valid = True
        
        img = render(grid, state, goal, valid)
        cv2.imshow("img", img)
        k = cv2.waitKey(1)
        if k == ord('q'):
            break
        elif k == ord('r'):
            grid = np.array(landscape).reshape(map_size, map_size).transpose()
    
    grid_ = grid.transpose()
    grid_ = grid_.reshape(map_size*map_size).tolist()
    return grid_

def qc_single_goal_record(record):

    var_threshold = 10
    optimization_scale = 2

    var_threshold = 5
    optimization_scale = 2

    trial_len = 0
    trial_len_old = 0
    step_ = 0
    trial_len_var = 0
    first_trial_len = -1
    for i in range(record.shape[0]):
        step_ += 1
        if record[i] == True:
            if first_trial_len == -1:
                first_trial_len = step_
            trial_len = step_
            step_ = 0
            # compute the variance of trial_len
            trial_len_var = 0.4 * abs(trial_len - trial_len_old) + 0.6 * trial_len_var
            trial_len_old = trial_len
            if trial_len_var <= var_threshold and first_trial_len/trial_len >= optimization_scale:
                return True
    return False

def main():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--device", type=int, default=rpl_config.device)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration
    rpl_config.device = args.device

    os.environ['CUDA_VISIBLE_DEVICES']=gpu_list[rpl_config.device]

    # 列出 ./logs/evo_log_dense 文件夹中的所有文件的名称
    log_dir = "./logs/evo_log_dense"
    file_names = os.listdir(log_dir)
    print("len(file_names): ", len(file_names))

    file_path = "./logs/rf_ratio_in_evolution_"+str(rpl_config.device)+".txt"
    with open(file_path, "w") as f:
        pass

    # 按照显卡编号分组这些文件名
    groups = {}
    for file_name in file_names:
        # 解析显卡编号和代数
        parts = file_name.split("_")
        card_id = parts[-1]
        gen = int(parts[-2])
        # 将文件名添加到对应的组中
        if card_id not in groups:
            groups[card_id] = []
        groups[card_id].append((gen, file_name))

    # 每一组内按照代数排序
    for card_id, group in groups.items():
        group.sort()
        group = [file_name for _, file_name in group]
        groups[card_id] = group
    

    groups_list = sorted([sorted(group, key=lambda x: int(x.split("_")[-1])) for group in groups.values()])

    eval_card_id = rpl_config.device
    print(groups_list[eval_card_id])

    # load all tasks in the dir "./data/adaptive_trajectory_optimization/task_envs/" using load_task()
    landscapes, states, goals = [], [], []
    dir_path = "./data/adaptive_trajectory_optimization/task_envs_rnd/"
    file_list = os.listdir(dir_path)
    file_count = len(file_list)
    for tt in range(file_count):
        progress_bar(tt, file_count)
        # get complete path
        task_pth = "./data/adaptive_trajectory_optimization/task_envs_rnd/task_" + str(tt) + ".json"
        landscape, state, goal = load_task(task_pth, display=False)

        # print(task_pth)

        # if len(landscapes) <= 20:
        landscapes.append(landscape)
        states.append(state)
        goals.append(goal)

    states = jnp.array(states)
    goals = jnp.array(goals)

    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=False)
    GE.reset()

    for m_i in groups_list[eval_card_id]:

        print("m_i============================================================ ", m_i)

        """ load model
        """
        # params = load_weights(rpl_config.model_pth)
        params = load_weights(log_dir+'/'+m_i)

        # get elements of params
        tree_leaves = jax.tree_util.tree_leaves(params)
        for i in range(len(tree_leaves)):
            print("shape of leaf ", i, ": ", tree_leaves[i].shape)
        
        """ create agent
        """
        if rpl_config.nn_type == "vanilla":
            model = RNN(hidden_dims = rpl_config.nn_size)
        elif rpl_config.nn_type == "gru":
            model = GRU(hidden_dims = rpl_config.nn_size)

        # check if param fits the agent
        if rpl_config.nn_type == "vanilla":
            assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

        """ create grid env
        """
        start_time = time.time()

        ''' trial 1 : reward mode
        '''
        GE.reward_free = False
        GE.reset()
        print("time taken to create envs: ", time.time() - start_time)

        # set states of GE
        GE.batched_states = states.copy()
        # set goals of GE
        GE.batched_goals = goals.copy()
        GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
        GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
        GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
        GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
        concat_obs = GE.concat_obs

        rnn_state = model.initial_state(GE.num_envs)
        step_count = 0
        trajectories = []
        goal_record = []

        for t in range(rpl_config.life_duration):

            progress_bar(t, rpl_config.life_duration)

            step_count += 1

            """ model forward and step the env
            """
            rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
            batched_actions = get_action_vmap(y1)
            batched_goal_reached, concat_obs = GE.step(batched_actions)

            trajectories.append(np.array(GE.batched_states))
            goal_record.append(batched_goal_reached)

        print("shape of trajectories: ", np.array(trajectories).shape)
        print("shape of goal_record: ", np.array(goal_record).shape)

        # switch the dimension1 and dimension2 of goal_record
        goal_record = np.array(goal_record).T
        print("shape of goal_record: ", goal_record.shape)

        n_qc_pass = 0
        qc_pass = []
        for i in range(goal_record.shape[0]):
            progress_bar(i, goal_record.shape[0])
            if qc_single_goal_record(goal_record[i]):
                n_qc_pass += 1
                qc_pass.append(i)
        
        print("shape of qc_pass: ", np.array(qc_pass).shape)

        print("qc ratio: ", n_qc_pass / goal_record.shape[0])

        reward_sr = n_qc_pass / goal_record.shape[0]

        qc_pass0 = np.array(qc_pass)


        ''' trial 2 : reward-free mode
        '''
        # GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
        GE.reward_free = True
        GE.reset()
        # print("time taken to create envs: ", time.time() - start_time)

        # set states of GE
        GE.batched_states = states.copy()
        # set goals of GE
        GE.batched_goals = goals.copy()
        GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
        GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
        GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
        GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
        concat_obs = GE.concat_obs

        rnn_state = model.initial_state(GE.num_envs)
        step_count = 0
        trajectories = []
        goal_record = []

        for t in range(rpl_config.life_duration):

            progress_bar(t, rpl_config.life_duration)

            step_count += 1

            """ model forward and step the env
            """
            rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
            batched_actions = get_action_vmap(y1)
            batched_goal_reached, concat_obs = GE.step(batched_actions)

            trajectories.append(np.array(GE.batched_states))
            goal_record.append(batched_goal_reached)

        print("shape of trajectories: ", np.array(trajectories).shape)
        print("shape of goal_record: ", np.array(goal_record).shape)

        # switch the dimension1 and dimension2 of goal_record
        goal_record = np.array(goal_record).T
        print("shape of goal_record: ", goal_record.shape)

        n_qc_pass = 0
        qc_pass = []
        for i in range(goal_record.shape[0]):
            progress_bar(i, goal_record.shape[0])
            if qc_single_goal_record(goal_record[i]):
                n_qc_pass += 1
                qc_pass.append(i)

        qc_pass1 = np.array(qc_pass)
        
        print("shape of qc_pass: ", np.array(qc_pass).shape)

        print("qc ratio: ", n_qc_pass / goal_record.shape[0])
        
        reward_free_sr = n_qc_pass / goal_record.shape[0]

        # print("ratio of reward_free_sr / reward_sr: ", reward_free_sr / reward_sr)

        # 将 (reward_free_sr, reward_sr) 以逐行增量的形式写入文本文件
        with open(file_path, "a") as f:
            f.write('['+f"{reward_free_sr:.6f}, {reward_sr:.6f}"+'],\n')

        # count = sum(1 for i in qc_pass0 if i in qc_pass1)
        # print("count : ", count)
            
if __name__ == "__main__":
    main()