from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal


def main():

    seq_len = 16

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    obs_seq_low_ambiguity_final = []

    for l in range(1):

        """ create grid env
        """
        start_time = time.time()

        landscape = generate_maze_pool(num_mazes = 5000, width = 10, height = 10, weight=0.4)
        landscape = padding_landscapes0(landscape, width=10+2, height=10+2)

        GE = GridEnv(landscapes = landscape, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
        GE.reset()
        print("time taken to create envs: ", time.time() - start_time)

        print("shape of GE.batched_envs = ", GE.batched_envs.shape)

        """ load redundant trajectories
        """
        trajectory_data = np.load("./logs/tracelet_slices_and_chosen_er9_tracelet.npz")
        tracelet_slices = trajectory_data["tracelet_slices"]
        chosen_er9_tracelet = trajectory_data["chosen_er9_tracelet"]

        print("tracelet_slices.shape: ", tracelet_slices.shape)
        print("chosen_er9_tracelet.shape: ", chosen_er9_tracelet.shape)

        @jax.jit
        def set_pixel(env_map, state, value):
            """ set pixel value in env_map
            """
            env_map = env_map.at[state[0], state[1]].set(value)
            return env_map
        set_pixel_vmap = jax.vmap(set_pixel, in_axes=(0, None, None))

        # 按照 tracelet_slices 的每个位置，将 GE 中每个环境的对应位置变成 free space 也就是1
        for i in range(tracelet_slices.shape[0]):
            position = tracelet_slices[i]
            GE.batched_envs = set_pixel_vmap(GE.batched_envs, position, 1)

        # 获取每个环境整条路径的 obs 序列
        obs_seq = []
        for i in range(tracelet_slices.shape[0]):
            # 构建一个形状和 GE.batched_states 一样的jnp零数组
            states = jnp.zeros_like(GE.batched_states)
            # 将 states 的每个元素变成 tracelet_slices[i]
            states = states.at[:,0].set(tracelet_slices[i][0])
            states = states.at[:,1].set(tracelet_slices[i][1])
            # 设置 GE.batched_states 为 states
            GE.batched_states = jnp.copy(states)
            # 获取 obs
            obs_state = get_ideal_obs_vmap_rf(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
            obs_seq.append(np.array(obs_state))

        obs_seq = np.array(obs_seq)
        # 交换 obs_seq 的第一维和第二维
        obs_seq = np.swapaxes(obs_seq, 0, 1)
        print("obs_seq.shape: ", obs_seq.shape)

        # 筛选低歧义性的 obs 序列
        obs_seq_low_ambiguity = []
        for i in range(obs_seq.shape[0]):
            low_ambiguity = True
            # for j in range(obs_seq.shape[1]-1):
            #     if np.sum(np.abs(obs_seq[i,j] - obs_seq[i,j+1])) == 0:
            #         low_ambiguity = False
            #         break
            if low_ambiguity:
                obs_seq_low_ambiguity.append(obs_seq[i])

        obs_seq_low_ambiguity_final = obs_seq_low_ambiguity_final + obs_seq_low_ambiguity

        obs_seq_low_ambiguity = np.array(obs_seq_low_ambiguity)
        print("obs_seq_low_ambiguity.shape: ", obs_seq_low_ambiguity.shape)

    obs_seq_low_ambiguity_final = np.array(obs_seq_low_ambiguity_final)
    print("obs_seq_low_ambiguity_final.shape: ", obs_seq_low_ambiguity_final.shape)

    # 保存 obs_seq_low_ambiguity_final，以及 trajectory_data 中的所有数据到同一个 npz
    np.savez("./logs/tracelet_slices_and_chosen_er9_tracelet_and_obs_seq_low_ambiguity.npz", 
             tracelet_slices=tracelet_slices, 
             chosen_er9_tracelet=chosen_er9_tracelet, 
             obs_seq_low_ambiguity_final=obs_seq_low_ambiguity_final)

    # def make_obs_img(obs_int):
    #     # 将形状为 (10,) 的 obs 裁减掉最后一位，变成形状为 (9,) 的 obs
    #     obs = obs_int[:-1]
    #     # 将 obs 转换为形状为 (3, 3) 的 numpy 数组
    #     obs = obs.reshape((3, 3))
        
    #     # 将 obs 中的 1 替换为 255
    #     obs = (1-obs) * 255
    #     # 将 obs 转换为形状为 (3, 3, 1) 的 numpy 数组
    #     obs = obs.reshape((3, 3, 1))
    #     # 将 obs 转换为形状为 (3, 3, 3) 的 numpy 数组
    #     obs = np.concatenate((obs, obs, obs), axis=2)
    #     # 将 obs 转换为 opencv 的8比特图像格式
    #     obs = obs.astype(np.uint8)
    #     # 将 obs 转换为形状为 (60, 60, 3) 的 numpy 数组
    #     obs = cv2.resize(obs, (60, 60), interpolation=cv2.INTER_NEAREST)
    #     # 在 obs 上绘制 3x3 的灰色网格
    #     for i in range(1, 3):
    #         cv2.line(obs, (0, i*20), (60, i*20), (100, 100, 100), 1)
    #         cv2.line(obs, (i*20, 0), (i*20, 60), (100, 100, 100), 1)
    #     # 在 obs 的边缘绘制灰色边框
    #     cv2.rectangle(obs, (0, 0), (59, 59), (100, 100, 100), 1)
    #     return obs

    # for k in range(20):
    #     # 从 obs_seq 中随机选一个 obs 序列
    #     rnd_idx = np.random.randint(0, obs_seq_low_ambiguity.shape[0])
    #     obs_seq_chosen = obs_seq_low_ambiguity[rnd_idx]

    #     obs_slices_img = []
    #     for i in range(obs_seq_chosen.shape[0]):
    #         obs_slices_img.append(make_obs_img(obs_seq_chosen[i]))
    #     # 将 obs_slices_img 中的所有图像拼接成一张大图
    #     obs_slices_img = np.concatenate(obs_slices_img, axis=1)
    #     cv2.imshow("obs_slices_img", obs_slices_img)
    #     cv2.waitKey(0)

    # fig, ax1 = plt.subplots(figsize=(12, 12))
    # ax1.plot(tracelet_slices[:,0], tracelet_slices[:,1], '-', color='red')
    # ax1.set_xlim(0, 11)
    # ax1.set_ylim(0, 11)
    # plt.show()

    # # render GE env[0]
    # for i in range(20):
    #     map_ = GE.render2(env_id=i)
    #     cv2.imshow("map_", map_)
    #     cv2.waitKey(0)

if __name__ == "__main__":
    main()