from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal


def main():

    seq_len = 15
    redundancy = 5
    diverse_set_capacity = 5

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--seq_len", type=int, default=8)
    parser.add_argument("--redundancy", type=int, default=3)
    parser.add_argument("--diverse_set_capacity", type=int, default=5)
    parser.add_argument('--gpu_id', type=int, default=0)


    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration
    redundancy = args.redundancy
    seq_len = args.seq_len
    diverse_set_capacity = args.diverse_set_capacity

    gpu_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    gpu_id = gpu_list[args.gpu_id]
    os.environ['CUDA_VISIBLE_DEVICES']=gpu_id

    print("redun: ", redundancy)
    print("seq_len", seq_len)

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    """ load task
    """
    # load all tasks in the dir "./data/adaptive_trajectory_optimization/task_envs/" using load_task()
    landscapes, states, goals = [], [], []
    dir_path = "./data/adaptive_trajectory_optimization/task_envs/"
    file_list = os.listdir(dir_path)
    file_count = len(file_list)
    for tt in range(file_count):
        progress_bar(tt, file_count)
        # get complete path
        task_pth = dir_path + "task_" + str(tt) + ".json"
        landscape, state, goal = load_task(task_pth, display=False)

        # print(task_pth)

        # if len(landscapes) <= 20:
        landscapes.append(landscape)
        states.append(state)
        goals.append(goal)

    num_envs = len(landscapes)

    states = jnp.array(states)
    goals = jnp.array(goals)

    print("shape of states: ", states.shape)
    print("shape of goals: ", goals.shape)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)
    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = states.copy()
    # set goals of GE
    GE.batched_goals = goals.copy()
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs
    
    arrow_length = 1
    arrow_list = np.array([[arrow_length, 0], [-arrow_length, 0], [0, arrow_length], [0, -arrow_length], [0, 0]])

    # 生成一个二进制串集合，其中每个元素都是一个长度为8的二进制串，要求这些二进制串从 000000000 遍历到 111111111
    binary_set = set()
    for i in range(256):
        binary_string = format(i, '08b')
        binary_set.add(binary_string)
    # 将 binary_set 中的 11111111 元素删除
    binary_set.remove("11111111")
    # 将 binary_set 中所有格式为 "x1x1x1x1" 的元素删除
    binary_set_bk = binary_set.copy()
    for binary in binary_set_bk:
        if binary[1] == '1' and binary[3] == '1' and binary[4] == '1' and binary[6] == '1':
            binary_set.remove(binary)
    print("binary_set: ", binary_set)
    
    # 对 binary_set 进行排序，使得其中的元素从 00000000 遍历到 11111110
    binary_list = sorted(binary_set)
    # 将 binary_list 中的每个元素进行这样的操作：在中间插入一个0，例如 "00000000" 转换为 "000000000"；然后在结尾处插入一个0，例如 "000000000" 转换为 "0000000000"
    binary_list = [i[:4] + "0" + i[4:] + "0" for i in binary_list]
    # 将 binary_list 中的元素转换为整数数组，例如 "00000000" 转换为 [0, 0, 0, 0, 0, 0, 0, 0]
    binary_list = [list(map(int, list(i))) for i in binary_list]
    binary_list = np.array(binary_list)
    print("shape of binary_list: ", binary_list.shape)

    binary_list, arrow_list = np.array(binary_list), np.array(arrow_list)

    trajectories = []
    goal_reached = []
    obs_record = []
    neural_states = []
    action_record = []

    rnn_state = model.initial_state(GE.num_envs)

    rkey = jax.random.PRNGKey(np.random.randint(0, 1000000))

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)
        batched_goal_reached, concat_obs = GE.step(batched_actions, reset=False)

        trajectories.append(np.array(GE.batched_states))
        obs_record.append(np.array(concat_obs))
        action_record.append(np.array(batched_actions))

    trajectories = np.array(trajectories)
    obs_record = np.array(obs_record)
    action_record = np.array(action_record)

    trajectories = np.swapaxes(trajectories, 0, 1)
    obs_record = np.swapaxes(obs_record, 0, 1)
    action_record = np.swapaxes(action_record, 0, 1)

    print("shape of trajectories: ", trajectories.shape)
    print("shape of obs_record: ", obs_record.shape)
    print("shape of action_record: ", action_record.shape)

    # # 将 goal_reached、trajectories、obs_record、neural_states 保存到文件中
    # np.save("./logs/goal_reached.npy", goal_reached)
    # np.save("./logs/trajectories.npy", trajectories)
    # np.save("./logs/obs_record.npy", obs_record)
    # np.save("./logs/neural_states.npy", neural_states)

    """ 从 trajectories 中筛选符合标准的路径片段
        1. 路径的长度等于 seq_len
        2. 起点到终点的hamming距离不小于 seq_len/redundancy
        3. 路径上没有重复位置的点(没有无效action)
    """
    tracelet_slices = []
    obs_slices = []
    action_slices = []
    for t in range(trajectories.shape[0]):
        
        progress_bar(t, trajectories.shape[0])

        qc_pass = False
        tracelet = trajectories[t]
        obs = obs_record[t]
        action = action_record[t]
        # 1. 路径的长度等于 seq_len
        for i in range(tracelet.shape[0]-seq_len+1):
            tracelet_slice = tracelet[i:i+seq_len]
            obs_slice = obs[i:i+seq_len]
            action_slice = action[i:i+seq_len]
            # 2. 起点到终点的hamming距离不小于 seq_len/2
            if seq_len - redundancy < 9*2:
                if seq_len - np.sum(np.abs(tracelet_slice[0] - tracelet_slice[-1])) == redundancy:
                    # 3. 路径上没有相邻位置重复位置的点(没有无效action)
                    NEA = False
                    for j in range(seq_len-1):
                        if np.sum(np.abs(tracelet_slice[j] - tracelet_slice[j+1])) == 0:
                            NEA = True
                            break
                    # 4. 筛选掉呈现出一条水平或者垂直直线的路径
                    if np.abs(tracelet_slice[0][0] - tracelet_slice[-1][0]) == 0:
                        NEA = True
                    if np.abs(tracelet_slice[0][1] - tracelet_slice[-1][1]) == 0:
                        NEA = True

                    if not NEA:
                        qc_pass = True
                        break
            else:
                if seq_len - np.sum(np.abs(tracelet_slice[0] - tracelet_slice[-1])) >= redundancy:
                    # 3. 路径上没有相邻位置重复位置的点(没有无效action)
                    NEA = False
                    for j in range(seq_len-1):
                        if np.sum(np.abs(tracelet_slice[j] - tracelet_slice[j+1])) == 0:
                            NEA = True
                            break
                    # 4. 筛选掉呈现出一条水平或者垂直直线的路径
                    if np.abs(tracelet_slice[0][0] - tracelet_slice[-1][0]) == 0:
                        NEA = True
                    if np.abs(tracelet_slice[0][1] - tracelet_slice[-1][1]) == 0:
                        NEA = True

                    if not NEA:
                        qc_pass = True
                        break

        if qc_pass:
            tracelet_slices.append(tracelet_slice)
            obs_slices.append(obs_slice)
            action_slices.append(action_slice)

    tracelet_slices = np.array(tracelet_slices)
    obs_slices = np.array(obs_slices)
    action_slices = np.array(action_slices)

    # # 将 tracelet_slices、obs_slices、action_slices 保存到文件中
    # slice_fn = "_" + nn_type + "_" + str(seq_len)
    # np.save("./logs/tracelet_slices"+slice_fn+".npy", tracelet_slices)
    # np.save("./logs/obs_slices"+slice_fn+".npy", obs_slices)
    # np.save("./logs/action_slices"+slice_fn+".npy", action_slices)

    print("shape of tracelet_slices: ", tracelet_slices.shape)
    print("shape of obs_slices: ", obs_slices.shape)
    print("shape of action_slices: ", action_slices.shape)

    def calculate_total_difference(traj1, traj2):
        return np.sum(np.linalg.norm(traj1 - traj2, axis=1))

    def find_most_diverse_set(tracelet_slices):

        # 将所有 tracelet_slices 按照其第一个元素的位置归一化到原点
        tracelet_slices_reg = tracelet_slices.copy()
        for i in range(tracelet_slices_reg.shape[0]):
            tracelet_slices_reg[i] = tracelet_slices[i] - tracelet_slices[i][0]

        num_trajectories = tracelet_slices_reg.shape[0]
        selected_sets = []

        for p in range(10000):
            progress_bar(p, 10000)
            indices = np.random.choice(num_trajectories, size=diverse_set_capacity, replace=False)
            total_difference = 0

            for i in range(diverse_set_capacity):
                for j in range(i+1, diverse_set_capacity):
                    total_difference += calculate_total_difference(tracelet_slices_reg[indices[i],:,:], tracelet_slices_reg[indices[j],:,:])

            selected_sets.append((indices, total_difference))

        selected_sets.sort(key=lambda x: x[1], reverse=True)
        most_diverse_set = selected_sets[0][0]

        return most_diverse_set

    most_diverse_set = find_most_diverse_set(tracelet_slices)

    # # 将所有 tracelet_slices[selected_trajectories] 用 plt.plot 绘制到同一个窗口中
    # fig, ax = plt.subplots()
    # for i in range(len(most_diverse_set)):
    #     idx = most_diverse_set[i]
    #     ax.plot(tracelet_slices[idx][:, 0]-tracelet_slices[idx][0,0], tracelet_slices[idx][:, 1]-tracelet_slices[idx][0,1], label=str(i))
    # ax.legend()
    # plt.show()

    diverse_set_trajectoies = tracelet_slices[most_diverse_set]
    diverse_set_trajectoies = np.array(diverse_set_trajectoies)
    print("shape of diverse_set_trajectoies: ", diverse_set_trajectoies.shape)

    diverse_set_actions = action_slices[most_diverse_set]
    diverse_set_actions = np.array(diverse_set_actions)
    print("shape of diverse_set_actions: ", diverse_set_actions.shape)

    # 依次生成每条轨迹的 n_samples 条合法obs序列
    # 每条轨迹的 obs 序列通过 n_samples 个随机环境来产生
    n_samples = 1000
    obs_data = []
    for i in range(diverse_set_trajectoies.shape[0]):

        progress_bar(i, diverse_set_trajectoies.shape[0])

        """ create grid env
        """
        # landscape = generate_maze_pool(num_mazes = n_samples, width = 10, height = 10, weight=0.4)
        # landscape = padding_landscapes0(landscape, width=10+2, height=10+2)
        landscape = generate_maze_pool_rnd(num_mazes = n_samples, width = 12, height = 12, weight=0.5)
        landscape = padding_landscapes0(landscape, width=10+2, height=10+2)
        GE = GridEnv(landscapes = landscape, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
        GE.reset()

        tracelet = diverse_set_trajectoies[i]

        @jax.jit
        def set_pixel(env_map, state, value):
            """ set pixel value in env_map
            """
            env_map = env_map.at[state[0], state[1]].set(value)
            return env_map
        set_pixel_vmap = jax.vmap(set_pixel, in_axes=(0, None, None))

        # 按照 tracelet 的每个位置，将 GE 中每个环境的对应位置变成 free space 也就是1
        for i in range(tracelet.shape[0]):
            position = tracelet[i]
            GE.batched_envs = set_pixel_vmap(GE.batched_envs, position, 1)

        # print("tracelet.shape: ", tracelet.shape)

        # 获取每个环境整条路径的 obs 序列
        obs_seq = []
        for i in range(tracelet.shape[0]):
            # 构建一个形状和 GE.batched_states 一样的jnp零数组
            states = jnp.zeros_like(GE.batched_states)
            # 将 states 的每个元素变成 tracelet_slices[i]
            states = states.at[:,0].set(tracelet[i][0])
            states = states.at[:,1].set(tracelet[i][1])
            # 设置 GE.batched_states 为 states
            GE.batched_states = jnp.copy(states)
            # 获取 obs
            obs_state = get_ideal_obs_vmap_rf(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
            obs_seq.append(np.array(obs_state))

        obs_seq = np.array(obs_seq)
        # 交换 obs_seq 的第一维和第二维
        obs_seq = np.swapaxes(obs_seq, 0, 1)
        # print("obs_seq.shape: ", obs_seq.shape)
        obs_data.append(obs_seq)

    obs_data = np.array(obs_data)

    # 保存所有数据到 npz
    file_name = "obs_data_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
    np.savez("./logs/" + file_name,
             obs_data=obs_data, 
             diverse_set_trajectoies=diverse_set_trajectoies, 
             diverse_set_actions=diverse_set_actions, 
             )




if __name__ == "__main__":
    main()