from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
from scipy.spatial import distance
import threading

# analysis of phase space

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal

# 全局变量，用于存储和图像显示线程交互的数据
class imgview:
    global_image = None
    imgview_exit = False
    trajectory = []
    focus_i = 0
    traj_i = 0
    start_i = 0
    goal_i = 0

imgview_data = imgview()

# 定义一个函数，用于在独立线程中显示图像
def show_image():
    grid_size_display = 20
    while not imgview_data.imgview_exit:
        # 检查全局变量是否有图像
        if imgview_data.global_image is not None:
            img = np.copy(imgview_data.global_image)
            start_x = imgview_data.trajectory[imgview_data.start_i][0]
            start_y = imgview_data.trajectory[imgview_data.start_i][1]
            cv2.circle(img, (start_y * grid_size_display + int(grid_size_display/2), start_x * grid_size_display + int(grid_size_display/2)), 7, (0, 0, 150), -1, cv2.LINE_AA)
            end_x = imgview_data.trajectory[imgview_data.goal_i][0]
            end_y = imgview_data.trajectory[imgview_data.goal_i][1]
            cv2.circle(img, (end_y * grid_size_display + int(grid_size_display/2), end_x * grid_size_display + int(grid_size_display/2)), 7, (0, 150, 0), -1, cv2.LINE_AA)

            # 显示图像
            cv2.imshow("Image", img)
            key = cv2.waitKey(1)
            if key == ord('a'):
                imgview_data.focus_i -= 1
                print("imgview_data.focus_i: ", imgview_data.focus_i)
            elif key == ord('d'):
                imgview_data.focus_i += 1
                print("imgview_data.focus_i: ", imgview_data.focus_i)
        else:
            # 图像还未产生，等待100毫秒
            time.sleep(0.1)

def main():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--start_i", type=int, default=rpl_config.start_i)
    parser.add_argument("--end_i", type=int, default=rpl_config.end_i)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration
    rpl_config.start_i = args.start_i
    rpl_config.end_i = args.end_i

    landscapes, states, goals = [], [], []
    
    landscape_, state_, goal_ = load_task(rpl_config.task_pth, display=False)

    landscapes.append(landscape_)
    states.append(state_)
    goals.append(goal_)

    states = jnp.array(states)
    goals = jnp.array(goals)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)

    bias1 = np.array(tree_leaves[0])
    mat1 = np.array(tree_leaves[1])
    print("mat1.shape: ", mat1.shape)

    mat_obs = np.array(tree_leaves[1])[128:137,:]
    mat_intr = np.array(tree_leaves[1])[0:128,:]
    print("mat_obs.shape: ", mat_obs.shape)

    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    start_time = time.time()

    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = states.copy()
    # set goals of GE
    GE.batched_goals = goals.copy()
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs

    print("shape of concat_obs: ", concat_obs[0].shape)

    rnn_state = model.initial_state(GE.num_envs)

    step_count = 0
    trajectories = []
    goal_record = []
    action_record = []
    HS_trajectory = []
    IPFs = []
    intr_field = []

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        step_count += 1

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)

        trajectories.append(np.array(GE.batched_states[0]))
        goal_record.append(GE.batched_goal_reached[0])
        action_record.append(batched_actions[0])

        batched_goal_reached, concat_obs = GE.step(batched_actions, reset=True)

    print("shape of trajectories: ", np.array(trajectories).shape)
    print("shape of goal_record: ", np.array(goal_record).shape)
    print("shape of action_record: ", np.array(action_record).shape)
    
    bg_img = GE.render2()
    grid_size_display = 20

    # 轨迹回放
    play_mode = 1
    i = 0
    while True:

        progress_bar(i, rpl_config.life_duration)

        cp_bg_img = np.copy(bg_img)
        start_x = trajectories[i][0]
        start_y = trajectories[i][1]
        # 0: right
        # 1: left
        # 2: down
        # 3: up
        action = action_record[i]
        cv2.circle(cp_bg_img, (start_y * grid_size_display + int(grid_size_display/2), start_x * grid_size_display + int(grid_size_display/2)), 7, (0, 0, 150), -1, cv2.LINE_AA)
        # 根据action计算箭头的方向
        if action == 0:
            dx, dy = 1, 0
        elif action == 1:
            dx, dy = -1, 0
        elif action == 2:
            dx, dy = 0, 1
        else:
            dx, dy = 0, -1

        # 计算箭头的起点和终点
        start_x = start_x * grid_size_display + int(grid_size_display/2)
        start_y = start_y * grid_size_display + int(grid_size_display/2)
        end_x = start_x + dy * 20
        end_y = start_y + dx * 20

        # 绘制箭头
        cv2.arrowedLine(cp_bg_img, (start_y, start_x), (end_y, end_x), (0, 0, 150), 2, cv2.LINE_AA, tipLength=0.3)

        cv2.imshow("Image", cp_bg_img)
        key = cv2.waitKey(play_mode)

        if key == ord(' '):
            play_mode = 1 - play_mode

        if key == ord('a') and play_mode == 0:
            i -= 1
        elif key == ord('d') and play_mode == 0:
            i += 1

        if key == ord('q'):
            break

        if play_mode == 1:
            i += 1

        if i >= len(trajectories):
            i = 0
        elif i < 0:
            i = len(trajectories) - 1


        

if __name__ == "__main__":
    main()