from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal

def main():


    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)

    arrow_length = 1
    arrow_list = np.array([[arrow_length, 0], [-arrow_length, 0], [0, arrow_length], [0, -arrow_length], [0, 0]])

    # 生成一个二进制串集合，其中每个元素都是一个长度为8的二进制串，要求这些二进制串从 000000000 遍历到 111111111
    binary_set = set()
    for i in range(256):
        binary_string = format(i, '08b')
        binary_set.add(binary_string)
    # 将 binary_set 中的 11111111 元素删除
    binary_set.remove("11111111")
    # 将 binary_set 中所有格式为 "x1x1x1x1" 的元素删除
    binary_set_bk = binary_set.copy()
    for binary in binary_set_bk:
        if binary[1] == '1' and binary[3] == '1' and binary[4] == '1' and binary[6] == '1':
            binary_set.remove(binary)
    print("binary_set: ", binary_set)
    
    # 对 binary_set 进行排序，使得其中的元素从 00000000 遍历到 11111110
    binary_list = sorted(binary_set)
    # 将 binary_list 中的每个元素进行这样的操作：在中间插入一个0，例如 "00000000" 转换为 "000000000"；然后在结尾处插入一个0，例如 "000000000" 转换为 "0000000000"
    binary_list = [i[:4] + "0" + i[4:] + "0" for i in binary_list]
    # 将 binary_list 中的元素转换为整数数组，例如 "00000000" 转换为 [0, 0, 0, 0, 0, 0, 0, 0]
    binary_list = [list(map(int, list(i))) for i in binary_list]
    binary_list = np.array(binary_list)
    # print("binary_list: ", binary_list)
    print("shape of binary_list: ", binary_list.shape)

    binary_list, arrow_list = jnp.array(binary_list), jnp.array(arrow_list)


    """ create landscape
    """
    
    # task_selected = "./logs/rf_pass_task.txt"
    # dir_path = "./data/adaptive_trajectory_optimization/task_envs/"
    # # 读入 task_selected 中的每一行，每一行是一个任务的名称
    # with open(task_selected, "r") as f:
    #     task_list = f.readlines()
    #     task_list = [task.strip() for task in task_list]
    # # 将 task_list 中的每个任务名称转换为任务的路径，加上 dir_path
    # task_list = [dir_path + task for task in task_list]
    # landscapes, states, goals = [], [], []
    # for tt in range(len(task_list)):
    #     progress_bar(tt, len(task_list))
    #     task_pth = task_list[tt]
    #     landscape, state, goal = load_task(task_pth, display=False)
    #     landscapes.append(landscape)
    #     states.append(state)
    #     goals.append(goal)
    # states = jnp.array(states)
    # goals = jnp.array(goals)

    # load all tasks in the dir "./data/adaptive_trajectory_optimization/task_envs/" using load_task()
    landscapes, states, goals = [], [], []
    dir_path = "./data/adaptive_trajectory_optimization/task_envs/"
    file_list = os.listdir(dir_path)
    file_count = len(file_list)
    for tt in range(file_count):
        progress_bar(tt, file_count)
        # get complete path
        task_pth = dir_path + "task_" + str(tt) + ".json"
        landscape, state, goal = load_task(task_pth, display=False)
        landscapes.append(landscape)
        states.append(state)
        goals.append(goal)
    states = jnp.array(states)
    goals = jnp.array(goals)

    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = states.copy()
    # set goals of GE
    GE.batched_goals = goals.copy()
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs

    rnn_state = model.initial_state(GE.num_envs)

    @jit
    def swap_xy(task_vector):
        return jnp.array([task_vector[1], task_vector[0]])

    @jit
    def action_distribution(rnn_state, binary_list):
        # compute actions
        def compute_action(binary, rnn_state):
            _, action_output = model_forward(params, rnn_state, binary, model)
            action = jnp.argmax(action_output)

            # 检查 action 方向是否有障碍物
            blocked = jnp.array([0, 0])
            blocked = jnp.where((action == 0) & (binary[5] == 1), jnp.array([1, 0]), blocked)
            blocked = jnp.where((action == 1) & (binary[3] == 1), jnp.array([0, 1]), blocked)
            blocked = jnp.where((action == 2) & (binary[7] == 1), jnp.array([-1, 0]), blocked)
            blocked = jnp.where((action == 3) & (binary[1] == 1), jnp.array([0, -1]), blocked)

            direction = arrow_list[action]
            direction = jnp.where(blocked[0] == 1, jnp.array([0, direction[1]]), direction)
            direction = jnp.where(blocked[1] == 1, jnp.array([direction[0], 0]), direction)

            return direction

        directions = vmap(compute_action, in_axes=(0, None))(binary_list, rnn_state)
        # 计算 directions 的均值
        mean_direction = jnp.mean(directions, axis=0)
        return mean_direction, directions
    
    action_distribution_vmap = jax.vmap(action_distribution, in_axes=(0, None))
        
    goal_record = []
    direction_mean_log = []
    position_log = []

    initial_states = GE.init_batched_states
    # 将 task_vectors 的第二个维度的所有元素互相交换，例如 task_vectors[i,0] 和 task_vectors[i,1] 互相交换
    initial_states = vmap(swap_xy)(initial_states)

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)

        # 测量方向表征
        # j_rnn_state = jnp.array(rnn_state)
        direction_mean, directions = action_distribution_vmap(rnn_state, binary_list)

        direction_mean_log.append(direction_mean)
        position_log.append(vmap(swap_xy)(GE.batched_states)-initial_states)    # 记录对齐到初始位置的位置

        batched_actions = get_action_vmap(y1)

        batched_goal_reached, concat_obs = GE.step(batched_actions)

        goal_record.append(batched_goal_reached)

    goal_record = np.array(goal_record).T
    direction_mean_log = np.array(direction_mean_log)
    position_log = np.array(position_log)
    # 交换 direction_mean_log 的第一、二个维度
    direction_mean_log = np.swapaxes(direction_mean_log, 0, 1)
    # 交换 position_log 的第一、二个维度
    position_log = np.swapaxes(position_log, 0, 1)
    print("goal_record.shape: ", goal_record.shape)
    print("direction_mean_log.shape: ", direction_mean_log.shape)
    print("position_log.shape: ", position_log.shape)

    def qc_single_goal_record(record):

        var_threshold = 1
        optimization_scale = 2

        trial_len = 0
        trial_len_old = 0
        step_ = 0
        trial_len_var = 0
        first_trial_len = -1
        qc_pass = False
        for i in range(record.shape[0]):
            step_ += 1
            if record[i] == True:
                if first_trial_len == -1:
                    first_trial_len = step_
                trial_len = step_
                step_ = 0
                # compute the variance of trial_len
                trial_len_var = 0.9 * abs(trial_len - trial_len_old) + 0.1 * trial_len_var
                trial_len_old = trial_len
                if trial_len_var <= var_threshold and first_trial_len/trial_len >= optimization_scale:
                    qc_pass = True
                else:
                    qc_pass = False
        if qc_pass:
            return True, trial_len_var
        else:
            return False, 100000


    n_qc_pass = 0
    qc_pass = []
    qc_fail = []
    trial_len_vars = []
    for i in range(goal_record.shape[0]):
        progress_bar(i, goal_record.shape[0])
        qc_true, trial_len_var = qc_single_goal_record(goal_record[i])
        if qc_true:
            n_qc_pass += 1
            qc_pass.append(i)
            trial_len_vars.append(trial_len_var)
        else:
            qc_fail.append(i)
    
    print("shape of qc_pass: ", np.array(qc_pass).shape)
    print("shape of qc_fail: ", np.array(qc_fail).shape)

    initial_states = GE.init_batched_states
    goal_states = GE.batched_goals
    task_vectors = goal_states - initial_states
    task_vectors = vmap(swap_xy)(task_vectors)
    task_vectors = task_vectors[np.array(qc_pass)]

    # ---------------------------------------------------------------------------
    # ---------------------------------------------------------------------------
    # ---------------------------------------------------------------------------
    # ---------------------------------------------------------------------------

    # 创建一个 24*24 的数组
    box_size = 19
    grid_counter = np.zeros((box_size, box_size))
    box_size = box_size//2

    for v_i in range(task_vectors.shape[0]):
        progress_bar(v_i, task_vectors.shape[0])
        x, y = task_vectors[v_i, 0], task_vectors[v_i, 1]
        grid_counter[x+box_size, y+box_size] += 1

    print("sum of grid_counter: ", np.sum(grid_counter))

    # grid_counter = np.flip(grid_counter, axis=0)

    # 将 grid_counter 绘制成图像，用plot显示
    plt.imshow(grid_counter)
    plt.show()


if __name__ == "__main__":
    main()