from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json"):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        # print("state: ", state)
        # print("goal: ", goal)
        # print("landscape: ", landscape)
    return landscape, state, goal

def main():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    k1 = jax.random.PRNGKey(npr.randint(0, 1000000))

    task_selected = "./logs/rf_pass_task.txt"
    dir_path = "./data/adaptive_trajectory_optimization/task_envs/"
    # 读入 task_selected 中的每一行，每一行是一个任务的名称
    with open(task_selected, "r") as f:
        task_list = f.readlines()
        task_list = [task.strip() for task in task_list]
    # 将 task_list 中的每个任务名称转换为任务的路径，加上 dir_path
    task_list = [dir_path + task for task in task_list]
    landscapes, states, goals = [], [], []
    for tt in range(len(task_list)):
        progress_bar(tt, len(task_list))
        task_pth = task_list[tt]
        landscape, state, goal = load_task(task_pth)
        landscapes.append(landscape)
        states.append(state)
        goals.append(goal)

    states = jnp.array(states)
    goals = jnp.array(goals)


    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)

    action_list = np.array([[0, 1], [0, -1], [1, 0], [-1, 0], [0, 0]])

    arrow_length = 1
    arrow_list = np.array([[arrow_length, 0], [-arrow_length, 0], [0, arrow_length], [0, -arrow_length], [0, 0]])

    # 生成一个二进制串集合，其中每个元素都是一个长度为8的二进制串，要求这些二进制串从 000000000 遍历到 111111111
    binary_set = set()
    for i in range(256):
        binary_string = format(i, '08b')
        binary_set.add(binary_string)
    # 将 binary_set 中的 11111111 元素删除
    binary_set.remove("11111111")
    # 将 binary_set 中所有格式为 "x1x1x1x1" 的元素删除
    binary_set_bk = binary_set.copy()
    for binary in binary_set_bk:
        if binary[1] == '1' and binary[3] == '1' and binary[4] == '1' and binary[6] == '1':
            binary_set.remove(binary)
    print("binary_set: ", binary_set)
    
    # 对 binary_set 进行排序，使得其中的元素从 00000000 遍历到 11111110
    binary_list = sorted(binary_set)
    # 将 binary_list 中的每个元素进行这样的操作：在中间插入一个0，例如 "00000000" 转换为 "000000000"；然后在结尾处插入一个0，例如 "000000000" 转换为 "0000000000"
    binary_list = [i[:4] + "0" + i[4:] + "0" for i in binary_list]
    # 将 binary_list 中的元素转换为整数数组，例如 "00000000" 转换为 [0, 0, 0, 0, 0, 0, 0, 0]
    binary_list = [list(map(int, list(i))) for i in binary_list]
    binary_list = np.array(binary_list)
    # print("binary_list: ", binary_list)
    print("shape of binary_list: ", binary_list.shape)

    binary_list, arrow_list = jnp.array(binary_list), jnp.array(arrow_list)


    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = [landscapes[0]], width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)


    for l_i in range(len(landscapes)):

        progress_bar(l_i, len(landscapes))

        landscape = [landscapes[l_i]]
        state = states[l_i]
        goal = goals[l_i]

        """ create landscape
        """
        GE.set_landscapes(landscapes = landscape)
        GE.reset()

        """ create agent
        """
        if rpl_config.nn_type == "vanilla":
            model = RNN(hidden_dims = rpl_config.nn_size)
        elif rpl_config.nn_type == "gru":
            model = GRU(hidden_dims = rpl_config.nn_size)

        # check if param fits the agent
        if rpl_config.nn_type == "vanilla":
            assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

        # set states of GE
        GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
        GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
        # set goals of GE
        GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
        GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
        GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
        GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
        GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
        GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)

        concat_obs = GE.concat_obs

        rnn_state = model.initial_state(GE.num_envs)

        step_count = 0
        render_id = 0

        trajectory = []

        reset_ = True

        first_arrival = 0

        @jit
        def action_distribution(rnn_state, binary_list):
            # compute actions
            def compute_action(binary, rnn_state):
                _, action_output = model_forward(params, rnn_state, binary, model)
                action = jnp.argmax(action_output)

                # 检查 action 方向是否有障碍物
                blocked = jnp.array([0, 0])
                blocked = jnp.where((action == 0) & (binary[5] == 1), jnp.array([1, 0]), blocked)
                blocked = jnp.where((action == 1) & (binary[3] == 1), jnp.array([0, 1]), blocked)
                blocked = jnp.where((action == 2) & (binary[7] == 1), jnp.array([-1, 0]), blocked)
                blocked = jnp.where((action == 3) & (binary[1] == 1), jnp.array([0, -1]), blocked)

                direction = arrow_list[action]
                direction = jnp.where(blocked[0] == 1, jnp.array([0, direction[1]]), direction)
                direction = jnp.where(blocked[1] == 1, jnp.array([direction[0], 0]), direction)

                return direction

            directions = vmap(compute_action, in_axes=(0, None))(binary_list, rnn_state[0])
            # 计算 directions 的均值
            mean_direction = jnp.mean(directions, axis=0)
            return mean_direction, directions

        direction_mean_lpf = np.zeros((2,))

        goal_record = []

        for t in range(rpl_config.life_duration):

            # progress_bar(t, rpl_config.life_duration)

            step_count += 1

            """ model forward and step the env
            """
            rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)

            j_rnn_state = jnp.array(rnn_state)
            direction_mean, directions = action_distribution(j_rnn_state, binary_list)
            direction_mean_lpf = 0.95 * direction_mean_lpf + 0.05 * direction_mean

            batched_actions = get_action_vmap(y1)
            batched_goal_reached, concat_obs = GE.step(batched_actions, reset = reset_)

            trajectory.append([20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10])
            goal_record.append(batched_goal_reached)

            if batched_goal_reached[render_id]:
                # print(len(trajectory))
                trajectory.clear()
            
            if batched_goal_reached[render_id] and first_arrival == 0:
                first_arrival = 1

        goal_record = np.array(goal_record).T

        def qc_single_goal_record(record):
            var_threshold = 1
            optimization_scale = 2
            trial_len = 0
            trial_len_old = 0
            step_ = 0
            trial_len_var = 0
            first_trial_len = -1
            qc_pass = False
            for i in range(record.shape[0]):
                step_ += 1
                if record[i] == True:
                    if first_trial_len == -1:
                        first_trial_len = step_
                    trial_len = step_
                    step_ = 0
                    # compute the variance of trial_len
                    trial_len_var = 0.9 * abs(trial_len - trial_len_old) + 0.1 * trial_len_var
                    trial_len_old = trial_len
                    if trial_len_var <= var_threshold and first_trial_len/trial_len >= optimization_scale:
                        qc_pass = True
                    else:
                        qc_pass = False
            if qc_pass:
                return True, trial_len_var
            else:
                return False, 100000
            
        qc_true, trial_len_var = qc_single_goal_record(goal_record[0])
        print("qc_true: ", qc_true)

        if qc_true:

            print(task_list[l_i])

            initial_state = GE.init_batched_states[render_id] * 20 + 10
            initial_state = np.array([initial_state[1], initial_state[0]])
            goal_state = GE.batched_goals[render_id] * 20 + 10
            goal_state = np.array([goal_state[1], goal_state[0]])
            task_vector = goal_state - initial_state
            task_vector = task_vector / np.linalg.norm(task_vector)
            action_distribution_A_vector = direction_mean_lpf
            action_distribution_A_vector = action_distribution_A_vector / np.linalg.norm(action_distribution_A_vector)

            # 计算 action_distribution_A_vector 和 task_vector 的相似度
            similarity_A = np.dot(action_distribution_A_vector, task_vector)

            print("similarity_A: ", similarity_A)
            

if __name__ == "__main__":
    main()