from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json"):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        print("state: ", state)
        print("goal: ", goal)
        print("landscape: ", landscape)
    return landscape, state, goal

# save current landscape as json file
def save_current_task(landscape, start_x, start_y, goal_x, goal_y, pth = "./logs/landscape.json"):
    landscape_ = []
    for j in range(landscape[0].shape[0]):
        landscape_.append(int(landscape[0][j]))

    with open(pth, "w") as f:
        json.dump({"data": landscape_, 
                   "state": [start_x, start_y],
                   "goal": [goal_x, goal_y]}, f)

def render(grid, state, goal, valid = True):
        
        state_x = int(state[0])
        state_y = int(state[1])

        food_x = int(goal[0])
        food_y = int(goal[1])

        grid_size_display = 20
        width, height = grid.shape[0], grid.shape[1]
        img = np.zeros((width * grid_size_display, height * grid_size_display, 3), np.uint8)
        
        for j in range(width):
            for i in range(height):
                if grid[j,i] == 1:
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (255, 255, 255), -1)
                    # draw border with color(100,100,100)
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (100, 100, 100), 1)
                else:
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (0, 0, 0), -1)
                    # draw border with color(100,100,100)
                    cv2.rectangle(img, (i * grid_size_display, j * grid_size_display), (i * grid_size_display + grid_size_display, j * grid_size_display + grid_size_display), (100, 100, 100), 1)
                if j == state_x and i == state_y:
                    cv2.circle(img, (i * grid_size_display + int(grid_size_display/2), j * grid_size_display + int(grid_size_display/2)), 7, (0, 0, 255), -1, cv2.LINE_AA)
        
        # put with a dot on food position
        cv2.circle(img, (food_y * grid_size_display + grid_size_display//2, food_x * grid_size_display + grid_size_display//2), 7, (0,100,0), -1, cv2.LINE_AA)

        # put with a dot on food position
        cv2.circle(img, (food_y * grid_size_display + grid_size_display//2, food_x * grid_size_display + grid_size_display//2), 7, (0,100,0), -1, cv2.LINE_AA)

        if not valid:
            # draw a big red cross
            cv2.line(img, (0, 0), (img.shape[1], img.shape[0]), (0, 0, 255), 5, cv2.LINE_AA)
            cv2.line(img, (0, img.shape[0]), (img.shape[1], 0), (0, 0, 255), 5, cv2.LINE_AA)
            cv2.putText(img, "invalid map", (int(img.shape[1]/2) - 100, int(img.shape[0]/2)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (200, 200, 200), 3, cv2.LINE_AA)
            cv2.putText(img, "invalid map", (int(img.shape[1]/2) - 100, int(img.shape[0]/2)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (200, 0, 0), 2, cv2.LINE_AA)
        return img

event_type = ""
event_x = 0
event_y = 0

def run_editor(landscape, state, goal, map_size=12):

    global event_type, event_x, event_y

    # define mouse callback function
    def input_cb(event,x,y,flags,param):
        global event_type, event_x, event_y
        if event == cv2.EVENT_MOUSEWHEEL:
            event_x = x
            event_y = y
            event_type = "flip_space"
        elif event == cv2.EVENT_LBUTTONUP:
            event_x = x
            event_y = y
            event_type = "set_start"
        elif event == cv2.EVENT_RBUTTONUP:
            event_x = x
            event_y = y
            event_type = "set_goal"

    cv2.namedWindow("img", cv2.WINDOW_GUI_NORMAL)
    cv2.setMouseCallback("img", input_cb)

    grid = np.array(landscape).reshape(map_size, map_size).transpose()
    valid = True
    grid_size_display = 20

    while True:

        if event_type == "flip_space":
            grid[event_y//grid_size_display, event_x//grid_size_display] = 1 - grid[event_y//grid_size_display, event_x//grid_size_display]
            event_type = "flip_space_done"
        elif event_type == "set_start":
            state = [event_y//grid_size_display, event_x//grid_size_display]
            event_type = "set_start_done"
        elif event_type == "set_goal":
            goal = [event_y//grid_size_display, event_x//grid_size_display]
            event_type = "set_goal_done"

        num_labels, labels, stats, centroids, num_freespace, landscape_img = check_num_labels(grid, map_size, map_size)
        non_zeros = np.count_nonzero(landscape)
        if not (num_labels == 2 and non_zeros >= 5):
            valid = False
        else:
            valid = True
        
        img = render(grid, state, goal, valid)
        cv2.imshow("img", img)
        k = cv2.waitKey(1)
        if k == ord('q'):
            break
        elif k == ord('s'):
            pth = "./logs/test.json"
            grid0 = grid.transpose()
            grid1 = grid0.reshape(map_size*map_size)
            save_current_task([grid1], state[0], state[1], goal[0], goal[1], pth)
            print("task saved to {}".format(pth))
        elif k == ord('r'):
            grid = np.array(landscape).reshape(map_size, map_size).transpose()
    
    grid_ = grid.transpose()
    grid_ = grid_.reshape(map_size*map_size).tolist()
    return grid_


def main():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    k1 = jax.random.PRNGKey(npr.randint(0, 1000000))

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)

    action_list = np.array([[0, 1], [0, -1], [1, 0], [-1, 0], [0, 0]])

    arrow_length = 1
    arrow_list = np.array([[arrow_length, 0], [-arrow_length, 0], [0, arrow_length], [0, -arrow_length], [0, 0]])

    # 生成一个二进制串集合，其中每个元素都是一个长度为8的二进制串，要求这些二进制串从 000000000 遍历到 111111111
    binary_set = set()
    for i in range(256):
        binary_string = format(i, '08b')
        binary_set.add(binary_string)
    # 将 binary_set 中的 11111111 元素删除
    binary_set.remove("11111111")
    # 将 binary_set 中所有格式为 "x1x1x1x1" 的元素删除
    binary_set_bk = binary_set.copy()
    for binary in binary_set_bk:
        if binary[1] == '1' and binary[3] == '1' and binary[4] == '1' and binary[6] == '1':
            binary_set.remove(binary)
    print("binary_set: ", binary_set)
    
    # 对 binary_set 进行排序，使得其中的元素从 00000000 遍历到 11111110
    binary_list = sorted(binary_set)
    # 将 binary_list 中的每个元素进行这样的操作：在中间插入一个0，例如 "00000000" 转换为 "000000000"；然后在结尾处插入一个0，例如 "000000000" 转换为 "0000000000"
    binary_list = [i[:4] + "0" + i[4:] + "0" for i in binary_list]
    # 将 binary_list 中的元素转换为整数数组，例如 "00000000" 转换为 [0, 0, 0, 0, 0, 0, 0, 0]
    binary_list = [list(map(int, list(i))) for i in binary_list]
    binary_list = np.array(binary_list)
    # print("binary_list: ", binary_list)
    print("shape of binary_list: ", binary_list.shape)

    binary_list, arrow_list = jnp.array(binary_list), jnp.array(arrow_list)


    """ create landscape
    """
    random_task = True
    # check if file on rpl_config.task_pth exists
    if os.path.isfile(rpl_config.task_pth):
        random_task = False

    if random_task:
        landscape = generate_maze_pool(num_mazes=1, width=10, height=10)
        landscape = padding_landscapes(landscape, width=12, height=12)
    else:
        landscape, state, goal = load_task(pth = rpl_config.task_pth)
        landscape = [landscape]

    print("landscape :")
    print(landscape)

    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = landscape, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # 采集所有位置的 general_obs
    # general_obs 是形状为 (12, 12, 10) 的 numpy 数组
    general_obs = np.zeros((12, 12, 10))
    for ii in range(1,11):      # width
        for jj in range(1,11):  # height
            batched_states = jnp.array([[jj,ii] for _ in range(1)])
            concat_obs = get_ideal_obs_vmap_rf(GE.batched_envs, batched_states, GE.batched_goals, GE.last_batched_goal_reached)
            general_obs[jj, ii] = concat_obs[0]
    # 将 general_obs 转化为一个线性列表
    general_obs_linear = general_obs.reshape((12*12, 10))
    general_obs_linear = jnp.array(general_obs_linear)

    if not random_task:
        # set states of GE
        GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
        GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
        # set goals of GE
        GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
        GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
        GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
        GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
        GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
        GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)

    concat_obs = GE.concat_obs

    rnn_state = model.initial_state(GE.num_envs)

    step_count = 0
    render_id = 0

    trajectory = []

    reset_ = True

    step_by_step = False
    manual_action = 0

    first_arrival = 0

    @jit
    def action_distribution(rnn_state, binary_list):
        # compute actions
        def compute_action(binary, rnn_state):
            _, action_output = model_forward(params, rnn_state, binary, model)
            action = jnp.argmax(action_output)

            # 检查 action 方向是否有障碍物
            blocked = jnp.array([0, 0])
            blocked = jnp.where((action == 0) & (binary[5] == 1), jnp.array([1, 0]), blocked)
            blocked = jnp.where((action == 1) & (binary[3] == 1), jnp.array([0, 1]), blocked)
            blocked = jnp.where((action == 2) & (binary[7] == 1), jnp.array([-1, 0]), blocked)
            blocked = jnp.where((action == 3) & (binary[1] == 1), jnp.array([0, -1]), blocked)

            direction = arrow_list[action]
            direction = jnp.where(blocked[0] == 1, jnp.array([0, direction[1]]), direction)
            direction = jnp.where(blocked[1] == 1, jnp.array([direction[0], 0]), direction)

            return direction

        directions = vmap(compute_action, in_axes=(0, None))(binary_list, rnn_state[0])
        # 计算 directions 的均值
        mean_direction = jnp.mean(directions, axis=0)
        return mean_direction, directions

    def make_obs_img(obs_int):
        # 将形状为 (10,) 的 obs 裁减掉最后一位，变成形状为 (9,) 的 obs
        obs = obs_int[:-1]
        # 将 obs 转换为形状为 (3, 3) 的 numpy 数组
        obs = obs.reshape((3, 3))
        
        # 将 obs 中的 1 替换为 255
        obs = (1-obs) * 255
        # 将 obs 转换为形状为 (3, 3, 1) 的 numpy 数组
        obs = obs.reshape((3, 3, 1))
        # 将 obs 转换为形状为 (3, 3, 3) 的 numpy 数组
        obs = np.concatenate((obs, obs, obs), axis=2)
        # 将 obs 转换为 opencv 的8比特图像格式
        obs = obs.astype(np.uint8)
        # 将 obs 转换为形状为 (60, 60, 3) 的 numpy 数组
        obs = cv2.resize(obs, (60, 60), interpolation=cv2.INTER_NEAREST)
        # 在 obs 上绘制 3x3 的灰色网格
        for i in range(1, 3):
            cv2.line(obs, (0, i*20), (60, i*20), (100, 100, 100), 1)
            cv2.line(obs, (i*20, 0), (i*20, 60), (100, 100, 100), 1)
        # 在 obs 的边缘绘制灰色边框
        cv2.rectangle(obs, (0, 0), (59, 59), (100, 100, 100), 1)
        return obs
    
    def make_obs_table_img():

        obs_imgs = []
        for i in range(binary_list.shape[0]):
            obs_img = make_obs_img(binary_list[i])
            obs_imgs.append(obs_img.copy())
        obs_imgs = np.array(obs_imgs)

        # 将 obs_imgs 拼接成一个大图像，每一行有 16 个 obs_img，共有 16 行，每个 obs_img 之间有 3 像素的间隔
        obs_table_img = np.zeros((16*60+15*3, 16*60+15*3, 3), dtype=np.uint8)
        obs_table_img[:, :] = (255, 255, 255)
        for i in range(16):
            for j in range(16):
                if i*16+j < obs_imgs.shape[0]:
                    obs_table_img[i*63:i*63+60, j*63:j*63+60] = obs_imgs[i*16+j]

        return obs_table_img.copy()
        
    obs_table_img = make_obs_table_img()

    def draw_directions_on_obs_table_img(obs_table_img, directions):
        # 在 obs_table_img 上绘制 directions
        obs_table_img1 = obs_table_img.copy()
        for i in range(16):
            for j in range(16):
                if i*16+j < directions.shape[0]:
                    cv2.arrowedLine(obs_table_img1, (int(j*63+30), int(i*63+30)), 
                                    (int(j*63+30 + directions[i*16+j][0]*20), int(i*63+30 + directions[i*16+j][1]*20)),
                                    (0, 0, 255), 2, tipLength=0.7)
        
        return obs_table_img1
        
    direction_mean_lpf = np.zeros((2,))
    direction_mean_lpf_B = np.zeros((2,))

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        step_count += 1

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)

        # 测量时间
        start_time = time.time()
        j_rnn_state = jnp.array(rnn_state)
        direction_mean, directions = action_distribution(j_rnn_state, binary_list)
        direction_mean_lpf = 0.95 * direction_mean_lpf + 0.05 * direction_mean
        # print("direction_mean_lpf: ", direction_mean_lpf)
        # print("time taken to compute action_distribution_A: ", time.time() - start_time)

        start_time = time.time()
        direction_mean_B, _ = action_distribution(j_rnn_state, general_obs_linear)
        direction_mean_lpf_B = 0.95 * direction_mean_lpf_B + 0.05 * direction_mean_B
        # print("direction_mean_lpf_B: ", direction_mean_lpf_B)
        # print("time taken to compute action_distribution_B: ", time.time() - start_time)

        if not step_by_step:
            batched_actions = get_action_vmap(y1)
        else:
            batched_actions = jnp.array([manual_action])

        batched_goal_reached, concat_obs = GE.step(batched_actions, reset = reset_)

        """ render the env
        """
        if rpl_config.visualization == "True" or rpl_config.video_output == "True":
            img = GE.render(env_id = render_id)
            if len(trajectory) > 1:
                for i in range(len(trajectory)-1):
                    cv2.line(img, (int(trajectory[i][1]), int(trajectory[i][0])), (int(trajectory[i+1][1]), int(trajectory[i+1][0])), (0,130,0), 2)
            
        trajectory.append([20 * GE.batched_states[render_id][0]+10, 20 * GE.batched_states[render_id][1]+10])

        if batched_goal_reached[render_id]:
            print(len(trajectory))
            trajectory.clear()
        
        if batched_goal_reached[render_id] and first_arrival == 0:
            first_arrival = 1

        """ scene display
        """
        if rpl_config.visualization == "True":

            # 创建一个和 img一样尺寸的图像，用于绘制 direction_mean_lpf
            action_distribution_img = np.zeros_like(img)
            # 将 action_distribution_img 的所有像素值设置为 (255, 255, 255)
            action_distribution_img[:, :] = (255, 255, 255)
            # 将 direction_mean_lpf 绘制成一个起点为 action_distribution_img 图像中心，终点为 direction_mean_lpf 的箭头
            cv2.arrowedLine(action_distribution_img, (int(img.shape[0]/2), int(img.shape[1]/2)), 
                            (int(img.shape[0]/2 + direction_mean_lpf[0]*100), int(img.shape[1]/2 + direction_mean_lpf[1]*100)),
                            (0, 0, 255), 2)
            # 将 direction_mean 绘制成一个起点为 action_distribution_img 图像中心，终点为 direction_mean 的箭头
            cv2.arrowedLine(action_distribution_img, (int(img.shape[0]/2), int(img.shape[1]/2)),
                            (int(img.shape[0]/2 + direction_mean[0]*100), int(img.shape[1]/2 + direction_mean[1]*100)),
                            (0, 255, 0), 2)
            # # 将 direction_mean_lpf_B 绘制成一个起点为 action_distribution_img 图像中心，终点为 direction_mean_lpf_B 的箭头
            # cv2.arrowedLine(action_distribution_img, (int(img.shape[0]/2), int(img.shape[1]/2)),
            #                 (int(img.shape[0]/2 + direction_mean_lpf_B[0]*100), int(img.shape[1]/2 + direction_mean_lpf_B[1]*100)),
            #                 (255, 0, 0), 2)
            # 在 action_distribution_img 上绘制真实的起点到终点的连线
            initial_state = GE.init_batched_states[render_id] * 20 + 10
            goal_state = GE.batched_goals[render_id] * 20 + 10
            cv2.arrowedLine(action_distribution_img, (int(initial_state[1]), int(initial_state[0])),
                            (int(goal_state[1]), int(goal_state[0])),
                            (0, 255, 0), 2)

            # 将 action_distribution_img 和 img 拼接在一起，显示在同一个窗口里
            img = np.concatenate((img, action_distribution_img), axis=1)
            cv2.imshow("img", img)

            # # obs_img = make_obs_img(concat_obs[0])
            # # cv2.imshow("obs", obs_img)
            # obs_table_img1 = draw_directions_on_obs_table_img(obs_table_img, directions)
            # cv2.imshow("obs_table", obs_table_img1)

            if step_by_step:
                k = cv2.waitKey(0)
            else:
                k = cv2.waitKey(1)
            if k == ord('r'): 
                rnn_state = model.initial_state(GE.num_envs)
                GE.rnd_goal_collection = get_rnd_goal_collection_vmap(GE.env_keys, GE.batched_envs, GE.width, GE.height, GE.num_free_spaces)
                GE.reset()
                if not random_task:
                    # set states of GE
                    GE.batched_states = GE.batched_states.at[0, 0].set(state[0])
                    GE.batched_states = GE.batched_states.at[0, 1].set(state[1])
                    # set goals of GE
                    GE.batched_goals = GE.batched_goals.at[0, 0].set(goal[0])
                    GE.batched_goals = GE.batched_goals.at[0, 1].set(goal[1])
                    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
                    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
                    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
                    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
                    concat_obs = GE.concat_obs
                    
                trajectory.clear()
            if k == ord('q'):
                exit()
        

    print("task : ",GE.init_batched_states[render_id], GE.batched_goals[render_id])

    initial_state = GE.init_batched_states[render_id] * 20 + 10
    initial_state = np.array([initial_state[1], initial_state[0]])
    goal_state = GE.batched_goals[render_id] * 20 + 10
    goal_state = np.array([goal_state[1], goal_state[0]])
    task_vector = goal_state - initial_state
    task_vector = task_vector / np.linalg.norm(task_vector)
    action_distribution_A_vector = direction_mean_lpf
    action_distribution_B_vector = direction_mean_lpf_B
    action_distribution_A_vector = action_distribution_A_vector / np.linalg.norm(action_distribution_A_vector)
    action_distribution_B_vector = action_distribution_B_vector / np.linalg.norm(action_distribution_B_vector)

    # 计算 action_distribution_A_vector 和 task_vector 的相似度
    similarity_A = np.dot(action_distribution_A_vector, task_vector)
    # similarity_A = np.abs(similarity_A)
    # 计算 action_distribution_B_vector 和 task_vector 的相似度
    similarity_B = np.dot(action_distribution_B_vector, task_vector)
    # similarity_B = np.abs(similarity_B)

    print("similarity_A: ", similarity_A)
    print("similarity_B: ", similarity_B)

    if similarity_A > similarity_B:
        print("action_distribution_A is better")
    else:
        print("action_distribution_B is better")

            

if __name__ == "__main__":
    main()