from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE


def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal

def get_task_states():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    # load all tasks in the dir "./data/adaptive_trajectory_optimization/task_envs/" using load_task()
    landscapes, states, goals = [], [], []
    dir_path = "./data/adaptive_trajectory_optimization/task_envs/"
    file_list = os.listdir(dir_path)
    file_count = len(file_list)
    for tt in range(file_count):
        progress_bar(tt, file_count)
        # get complete path
        task_pth = dir_path + "task_" + str(tt) + ".json"
        landscape, state, goal = load_task(task_pth, display=False)

        # print(task_pth)

        # if len(landscapes) <= 20:
        landscapes.append(landscape)
        states.append(state)
        goals.append(goal)

    states = jnp.array(states)
    goals = jnp.array(goals)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)
    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    start_time = time.time()

    # '''empty space test
    # '''
    # blank_space = [1 for _ in range(12*12)]
    # # make borders
    # for i in range(12):
    #     blank_space[i] = 0
    #     blank_space[11*12 + i] = 0
    #     blank_space[i*12] = 0
    #     blank_space[i*12 + 11] = 0
    # ''' create a batch of empty landscapes
    # '''
    # for i in range(len(landscapes)):
    #     landscapes[i] = blank_space

    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = states.copy()
    # set goals of GE
    GE.batched_goals = goals.copy()
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs

    rnn_state = model.initial_state(GE.num_envs)
    step_count = 0
    trajectories = []
    goal_record = []

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        step_count += 1

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)
        batched_goal_reached, concat_obs = GE.step(batched_actions)

        trajectories.append(np.array(GE.batched_states))
        goal_record.append(batched_goal_reached)

        # img = GE.render(4699)
        # cv2.imshow("img", img)
        # cv2.waitKey(1)

    print("shape of trajectories: ", np.array(trajectories).shape)
    print("shape of goal_record: ", np.array(goal_record).shape)
    print("shape of rnn_state: ", rnn_state.shape)

    # switch the dimension1 and dimension2 of goal_record
    goal_record = np.array(goal_record).T
    print("shape of goal_record: ", goal_record.shape)

    def qc_single_goal_record(record):

        # var_threshold = 10
        # optimization_scale = 2

        var_threshold = 5
        optimization_scale = 2

        trial_len = 0
        trial_len_old = 0
        step_ = 0
        trial_len_var = 0
        first_trial_len = -1
        avg_trial_len = 0
        for i in range(record.shape[0]):
            step_ += 1
            if record[i] == True:
                if first_trial_len == -1:
                    first_trial_len = step_
                trial_len = step_
                step_ = 0
                # compute the variance of trial_len
                trial_len_var = 0.4 * abs(trial_len - trial_len_old) + 0.6 * trial_len_var
                trial_len_old = trial_len
                if trial_len_var <= var_threshold and first_trial_len/trial_len >= optimization_scale:
                    return True
        return False
    
    # qc_test = qc_single_goal_record(goal_record[4699])
    # print("qc_test: ", qc_test)

    n_qc_pass = 0
    qc_pass = []
    qc_fail = []
    for i in range(goal_record.shape[0]):
        progress_bar(i, goal_record.shape[0])
        if qc_single_goal_record(goal_record[i]):
            n_qc_pass += 1
            qc_pass.append(i)
        else:
            qc_fail.append(i)
    
    print("shape of qc_pass: ", np.array(qc_pass).shape)
    print("shape of qc_fail: ", np.array(qc_fail).shape)

    qc_pass_goals = GE.batched_goals[jnp.array(qc_pass)].copy()
    # qc_pass_goals = GE.init_batched_states[jnp.array(qc_pass)].copy()
    # qc_pass_goals = GE.batched_goals.copy()
    print("shape of qc_pass_goals: ", qc_pass_goals.shape)

    print("qc ratio: ", n_qc_pass / goal_record.shape[0])

    # 对 rnn_state 进行降维
    rnn_state_success = np.array(rnn_state)[np.array(qc_pass)].copy()
    
    return rnn_state_success.copy()


def get_task_states_then_converge():
    
    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    # load all tasks in the dir "./data/adaptive_trajectory_optimization/task_envs/" using load_task()
    landscapes, states, goals = [], [], []
    dir_path = "./data/adaptive_trajectory_optimization/task_envs/"
    file_list = os.listdir(dir_path)
    file_count = len(file_list)
    for tt in range(file_count):
        progress_bar(tt, file_count)
        # get complete path
        task_pth = dir_path + "task_" + str(tt) + ".json"
        landscape, state, goal = load_task(task_pth, display=False)

        # print(task_pth)

        # if len(landscapes) <= 20:
        landscapes.append(landscape)
        states.append(state)
        goals.append(goal)

    states = jnp.array(states)
    goals = jnp.array(goals)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)
    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    start_time = time.time()

    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = states.copy()
    # set goals of GE
    GE.batched_goals = goals.copy()
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs

    rnn_state = model.initial_state(GE.num_envs)
    step_count = 0
    trajectories = []
    goal_record = []

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        step_count += 1

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)
        batched_goal_reached, concat_obs = GE.step(batched_actions)

        trajectories.append(np.array(GE.batched_states))
        goal_record.append(batched_goal_reached)

        # img = GE.render(4699)
        # cv2.imshow("img", img)
        # cv2.waitKey(1)

    print("shape of trajectories: ", np.array(trajectories).shape)
    print("shape of goal_record: ", np.array(goal_record).shape)
    print("shape of rnn_state: ", rnn_state.shape)

    # switch the dimension1 and dimension2 of goal_record
    goal_record = np.array(goal_record).T
    print("shape of goal_record: ", goal_record.shape)

    def qc_single_goal_record(record):

        # var_threshold = 10
        # optimization_scale = 2

        var_threshold = 5
        optimization_scale = 2

        trial_len = 0
        trial_len_old = 0
        step_ = 0
        trial_len_var = 0
        first_trial_len = -1
        avg_trial_len = 0
        for i in range(record.shape[0]):
            step_ += 1
            if record[i] == True:
                if first_trial_len == -1:
                    first_trial_len = step_
                trial_len = step_
                step_ = 0
                # compute the variance of trial_len
                trial_len_var = 0.4 * abs(trial_len - trial_len_old) + 0.6 * trial_len_var
                trial_len_old = trial_len
                if trial_len_var <= var_threshold and first_trial_len/trial_len >= optimization_scale:
                    return True
        return False
    
    # qc_test = qc_single_goal_record(goal_record[4699])
    # print("qc_test: ", qc_test)

    n_qc_pass = 0
    qc_pass = []
    qc_fail = []
    for i in range(goal_record.shape[0]):
        progress_bar(i, goal_record.shape[0])
        if qc_single_goal_record(goal_record[i]):
            n_qc_pass += 1
            qc_pass.append(i)
        else:
            qc_fail.append(i)
    
    qc_pass_goals = GE.batched_goals[jnp.array(qc_pass)].copy()

    print("qc ratio: ", n_qc_pass / goal_record.shape[0])

    # 对 rnn_state 进行 converge
    rnn_state_success = jnp.array(rnn_state)[jnp.array(qc_pass)].copy()
    obs_zero = jnp.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0] for i in range(rnn_state_success.shape[0])])

    rnn_state_success1 = rnn_state_success.copy()
    
    for t in range(rpl_config.life_duration):
        progress_bar(t, rpl_config.life_duration)
        """ model forward 
        """
        rnn_state_success, y1 = model_forward(params, rnn_state_success, obs_zero, model)

    rnn_state_success_diff = rnn_state_success - rnn_state_success1
    # 求 rnn_state_success_diff 的均值
    rnn_state_success_diff_mean = jnp.mean(rnn_state_success_diff, axis=0)
    print("rnn_state_success_diff_mean: ", rnn_state_success_diff_mean)
    
    return np.array(rnn_state_success).copy()

def get_converge_states():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--device", type=int, default=rpl_config.device)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration
    rpl_config.device = args.device

    print("rpl_config.model_pth: ", rpl_config.model_pth)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)
    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    n_samples = 10000
    k1 = npr.randint(0, 1000000)
    rnn_state = model.initial_state_rnd(n_samples, k1)
    rnn_state_old = rnn_state.copy()
    diff = jnp.abs(rnn_state - rnn_state_old)
    rnn_state_old = rnn_state.copy()
    diff_norm = jnp.linalg.norm(diff, axis=1)
    diff_norm_old = diff_norm.copy()
    norm_std = diff_norm.copy()

    rnn_state_init = rnn_state.copy()

    obs_zero = jnp.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0] for i in range(n_samples)])

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        """ model forward 
        """
        rnn_state, y1 = model_forward(params, rnn_state, obs_zero, model)
        # diff = jnp.abs(rnn_state - rnn_state_old)
        # rnn_state_old = rnn_state.copy()
        # diff_norm = jnp.linalg.norm(diff, axis=1)
        # norm_std = 0.4 * jnp.abs(diff_norm - diff_norm_old) + 0.6 * norm_std
        # diff_norm_old = diff_norm.copy()

            
    # print(rnn_state.shape)
    # print(norm_std.shape)
    rnn_state_np = np.array(rnn_state)

    return rnn_state_np.copy()

def main():

    # rnn_state_success = get_task_states()
    rnn_state_success = get_task_states_then_converge()
    rnn_state_converge = get_converge_states()
    rnn_state_success1 = np.load('./data/rnn_state_success.npy')

    diff = rnn_state_success1 - rnn_state_success
    # 打印差异的形状和前 10 个元素
    print("Shape of diff: ", diff.shape)
    diff_mean = np.mean(diff, axis=0)
    print("Mean of diff: ", diff_mean)

    rnn_state_success = rnn_state_success + diff

    # np.save('./data/rnn_state_converge.npy', rnn_state_converge)
    # np.save('./data/rnn_state_success.npy', rnn_state_success)

    # rnn_state_success = np.load('./data/rnn_state_success.npy')
    # rnn_state_converge = np.load('./data/rnn_state_converge.npy')

    # rnn_state_converge = rnn_state_success.copy()

    # rnn_state_success = rnn_state_success[:rnn_state_success.shape[0]//2].copy()

    print("shape of rnn_state_success: ", rnn_state_success.shape)
    print("shape of rnn_state_converge: ", rnn_state_converge.shape)

    # 将 rnn_state_success 和 rnn_state_converge 拼接起来
    rnn_state_combined = np.concatenate([rnn_state_success, rnn_state_converge], axis=0)

    # 对拼接起来的数据进行 tsne
    tsne = TSNE(n_components=2, random_state=0)
    rnn_state_tsne = tsne.fit_transform(rnn_state_combined)

    # 为每个点指定颜色和大小
    colors = ['b'] * rnn_state_success.shape[0] + ['r'] * rnn_state_converge.shape[0]
    sizes = [50] * rnn_state_success.shape[0] + [5] * rnn_state_converge.shape[0]

    # 绘制点状图
    plt.scatter(rnn_state_tsne[:, 0], rnn_state_tsne[:, 1], c=colors, s=sizes)
    plt.title('RNN State t-SNE')
    plt.xlabel('Dimension 1')
    plt.ylabel('Dimension 2')
    plt.show()

    # # 对拼接起来的数据进行 tsne
    # tsne = TSNE(n_components=2, random_state=0)
    # rnn_state_tsne = tsne.fit_transform(rnn_state_combined)
    # # 绘制点状图
    # plt.scatter(rnn_state_tsne[:, 0], rnn_state_tsne[:, 1])
    # plt.title('RNN State t-SNE')
    # plt.xlabel('Dimension 1')
    # plt.ylabel('Dimension 2')
    # plt.show()


def main1():

    # rnn_state_success = get_task_states()
    rnn_state_success = get_task_states_then_converge()
    rnn_state_converge = get_converge_states()
    rnn_state_success1 = np.load('./data/rnn_state_success.npy')

    diff = rnn_state_success1 - rnn_state_success
    # 打印差异的形状和前 10 个元素
    print("Shape of diff: ", diff.shape)
    diff_mean = np.mean(diff, axis=0)
    print("Mean of diff: ", diff_mean)

    rnn_state_success = rnn_state_success + diff

    # np.save('./data/rnn_state_converge.npy', rnn_state_converge)
    # np.save('./data/rnn_state_success.npy', rnn_state_success)

    # rnn_state_success = np.load('./data/rnn_state_success.npy')
    # rnn_state_converge = np.load('./data/rnn_state_converge.npy')

    # rnn_state_converge = rnn_state_success.copy()

    # rnn_state_success = rnn_state_success[:rnn_state_success.shape[0]//2].copy()

    print("shape of rnn_state_success: ", rnn_state_success.shape)
    print("shape of rnn_state_converge: ", rnn_state_converge.shape)

    # 将 rnn_state_success 和 rnn_state_converge 拼接起来
    rnn_state_combined = np.concatenate([rnn_state_success, rnn_state_converge], axis=0)

    # 对拼接起来的数据进行 tsne
    tsne = TSNE(n_components=2, random_state=0)
    rnn_state_tsne = tsne.fit_transform(rnn_state_combined)

    # 为每个点指定颜色和大小
    colors = ['b'] * rnn_state_success.shape[0] + ['r'] * rnn_state_converge.shape[0]
    sizes = [50] * rnn_state_success.shape[0] + [5] * rnn_state_converge.shape[0]

    # 绘制点状图
    plt.scatter(rnn_state_tsne[:, 0], rnn_state_tsne[:, 1], c=colors, s=sizes)
    plt.title('RNN State t-SNE')
    plt.xlabel('Dimension 1')
    plt.ylabel('Dimension 2')
    plt.show()
    

if __name__ == "__main__":

    main()