from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal


def load_policy_ring_pca():


    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    def load_data_and_compute(nn_type, seq_len, redundancy, diverse_set_capacity):

        rnn_limit_rings_file_name = "./logs/rnn_limit_rings_of_best_estimation_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
        # 载入 npz 文件
        rnn_limit_rings_of_best_estimation_file = np.load(rnn_limit_rings_file_name)

        # 获取 npz 文件中的所有对象名称
        matrix_names = rnn_limit_rings_of_best_estimation_file.files

        rnn_limit_rings_of_best_estimation = []

        # 遍历对象名称，访问和操作每个矩阵对象
        for name in matrix_names:
            matrix = rnn_limit_rings_of_best_estimation_file[name]
            # 在这里进行对矩阵对象的操作
            # 例如，打印矩阵的形状
            # print(f"Matrix '{name}' shape: {matrix.shape}")
            rnn_limit_rings_of_best_estimation.append(matrix)

        # 求 rnn_limit_rings_of_best_estimation 的中心位置序列
        rnn_limit_rings_of_best_estimation_center = []
        for i in range(len(rnn_limit_rings_of_best_estimation)):
            rnn_limit_rings_of_best_estimation_center.append(np.mean(rnn_limit_rings_of_best_estimation[i], axis=(0,1)))
            # print("shape of rnn_limit_rings_of_best_estimation[i]: ", rnn_limit_rings_of_best_estimation[i].shape)
        rnn_limit_rings_of_best_estimation_center = np.array(rnn_limit_rings_of_best_estimation_center)
        print("rnn_limit_rings_of_best_estimation_center.shape: ", rnn_limit_rings_of_best_estimation_center.shape)

        return rnn_limit_rings_of_best_estimation_center
    
    configs = [

        [nn_type, 5, 1, 400],
        [nn_type, 6, 1, 400],
        [nn_type, 7, 1, 400],
        [nn_type, 8, 1, 400],
        [nn_type, 9, 1, 400],
        [nn_type, 10, 1, 400],
        [nn_type, 11, 1, 400],
        [nn_type, 12, 1, 400],
        [nn_type, 13, 1, 400],
        [nn_type, 14, 1, 400],
        
        ]

    rnn_limit_rings_of_best_estimation_centers = []
    ring_lengths = []
    for i in range(len(configs)):
        _rnn_limit_rings_of_best_estimation_centers = load_data_and_compute(configs[i][0], configs[i][1], configs[i][2], configs[i][3])
        rnn_limit_rings_of_best_estimation_centers.append(_rnn_limit_rings_of_best_estimation_centers)

    rnn_limit_rings_of_best_estimation_centers = np.array(rnn_limit_rings_of_best_estimation_centers)

    print("rnn_limit_rings_of_best_estimation_centers.shape: ", rnn_limit_rings_of_best_estimation_centers.shape)

    rnn_limit_rings_of_best_estimation_centers_flat = rnn_limit_rings_of_best_estimation_centers.reshape(rnn_limit_rings_of_best_estimation_centers.shape[0]*rnn_limit_rings_of_best_estimation_centers.shape[1], rnn_limit_rings_of_best_estimation_centers.shape[2])

    # 对 rnn_limit_rings_of_best_estimation_centers_flat 进行 PCA
    pca = PCA()
    pca.fit(rnn_limit_rings_of_best_estimation_centers_flat)

    rnn_limit_rings_of_best_estimation_centers_flat_pca = pca.transform(rnn_limit_rings_of_best_estimation_centers_flat)

    return pca, rnn_limit_rings_of_best_estimation_centers_flat_pca

def main():

    PR_pca, rnn_limit_rings_of_best_estimation_centers_flat_pca = load_policy_ring_pca()

    seq_len = 15

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    """ load task
    """
    landscapes, states, goals = [], [], []
    rf_task_file = "./data/rf_pass_task_"+nn_type+".txt"
    rf_task_list = []
    for line in open(rf_task_file):
        rf_task_list.append(line.strip())
    print("len of tf_task_list: ", len(rf_task_list))
    # print("tf_task_list: ", tf_task_list)
    # dir_path = "./data/adaptive_trajectory_optimization/task_envs/"
    dir_path = "./data/adaptive_trajectory_optimization/task_envs_rnd/"
    for tt in rf_task_list:
        # get complete path
        task_pth = dir_path + str(tt)
        landscape, state, goal = load_task(task_pth, display=False)

        landscapes.append(landscape)
        states.append(state)
        goals.append(goal)

    num_envs = len(landscapes)

    states = jnp.array(states)
    goals = jnp.array(goals)

    print("shape of states: ", states.shape)
    print("shape of goals: ", goals.shape)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)
    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = states.copy()
    # set goals of GE
    GE.batched_goals = goals.copy()
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs
    
    trajectories = []
    goal_record = []
    obs_record = []
    neural_states = []
    action_record = []

    rnn_state = model.initial_state(GE.num_envs)

    rkey = jax.random.PRNGKey(np.random.randint(0, 1000000))

    rnd_inx = jax.random.randint(rkey, (1,), 0, num_envs)

    rnd_inx = rnd_inx.at[0].set(583)

    print("rnd_inx: ", rnd_inx)

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)
        batched_goal_reached, concat_obs = GE.step(batched_actions, reset=True)

        trajectories.append(np.array(GE.batched_states))
        obs_record.append(np.array(concat_obs))
        action_record.append(np.array(batched_actions))
        neural_states.append(np.array(rnn_state))
        goal_record.append(batched_goal_reached)

    #     img = GE.render(rnd_inx[0])
    #     cv2.imshow("img", img)
    #     if(cv2.waitKey(1) == ord('q')):
    #         break
    # exit()

    trajectories = np.array(trajectories)
    obs_record = np.array(obs_record)
    action_record = np.array(action_record)
    neural_states = np.array(neural_states)

    trajectories = np.swapaxes(trajectories, 0, 1)
    obs_record = np.swapaxes(obs_record, 0, 1)
    action_record = np.swapaxes(action_record, 0, 1)
    neural_states = np.swapaxes(neural_states, 0, 1)

    goal_record = np.array(goal_record).T

    print("shape of trajectories: ", trajectories.shape)
    print("shape of obs_record: ", obs_record.shape)
    print("shape of action_record: ", action_record.shape)
    print("shape of neural_states: ", neural_states.shape)
    print("shape of goal_record: ", goal_record.shape)

    # 統計每一條軌跡的 trial 長度演化記錄
    trial_len_record = []
    task_ids = []
    for i in range(goal_record.shape[0]):
        progress_bar(i, goal_record.shape[0])
        trial_len = []
        seq_len = 0
        for j in range(goal_record.shape[1]):
            if goal_record[i,j] == True:
                trial_len.append(seq_len)
                seq_len = 0
            else:
                seq_len += 1
        # 計算 trial_len 是否以平穩過程結束，也就是 trial_len 的最後10個元素的標準差是否小於3
        if np.std(trial_len[-10:]) < 2:
            trial_len_record.append(trial_len)
            task_ids.append(i)
        
    # 繪製 trial_len_record_interp 的曲線
    fig = plt.figure()
    ax = fig.add_subplot(111)
    for i in range(len(trial_len_record)):
        ax.plot(trial_len_record[i])
    plt.show()

    
    # 我們來分析 trial_len_record 中每一條記錄裏，曲線進入平穩之前（np.std(trial_len[-10:]) < 2）的最後一個元素
    new_trial_len_record = []
    selected_task_ids = []
    for i in range(len(trial_len_record)):
        trial_len = trial_len_record[i]
        stable_len = np.mean(trial_len[-10:]) + max(1*np.std(trial_len[-10:]), 2)
        # 從後往前找到第一個大於 stable_len 的元素
        stable_idx = -1
        for j in range(len(trial_len)-1, -1, -1):
            if trial_len[j] > stable_len:
                stable_idx = j
                break
        if trial_len[stable_idx] <= 50 and stable_idx!=-1 and len(trial_len[stable_idx:]) < 50:
            new_trial_len_record.append(trial_len[stable_idx:])
            selected_task_ids.append(task_ids[i])

    print("len of new_trial_len_record: ", len(new_trial_len_record))
    print("selected_task_ids : ", selected_task_ids)

    # 繪製 new_trial_len_record 的曲線
    fig = plt.figure()
    ax = fig.add_subplot(111)
    for i in range(len(new_trial_len_record)):
        ax.plot(new_trial_len_record[i])
    plt.show()

    # 將 new_trial_len_record 轉換成神經軌跡數據的起始點
    selected_nn_trj_idx = []
    for i in range(len(new_trial_len_record)):
        trj_len = np.sum(new_trial_len_record[i])
        selected_nn_trj_idx.append(1000-trj_len)

    # # 打印出 new_trial_len_record 首個元素最大值的 selected_task_ids
    # max_len = 0
    # target_task_id = 0
    # for i in range(len(new_trial_len_record)):
    #     if max(new_trial_len_record[i]) > max_len:
    #         max_len = max(new_trial_len_record[i])
    #         target_task_id = selected_task_ids[i]
    # print("target_task_id: ", target_task_id)
    # print("max_len: ", max_len)

    # 選出相應的神經軌跡
    selected_nn_trj_complete = neural_states[selected_task_ids]
    selected_nn_trj = []
    for i in range(len(selected_nn_trj_complete)):
        selected_nn_trj.append(selected_nn_trj_complete[i][selected_nn_trj_idx[i]:])
    
    selected_nn_trj_linear = np.concatenate(selected_nn_trj, axis=0)
    print("shape of selected_nn_trj_linear: ", selected_nn_trj_linear.shape)

    # 對 selected_nn_trj_linear 進行 PCA 降維
    # pca = PCA()
    # pca.fit(selected_nn_trj_linear)
    selected_nn_trj_pca = []
    for i in range(len(selected_nn_trj)):
        # selected_nn_trj_pca.append(pca.transform(selected_nn_trj[i]))
        selected_nn_trj_pca.append(PR_pca.transform(selected_nn_trj[i]))

    for task_id in range(len(selected_nn_trj_pca)):
        # 繪製 selected_nn_trj_pca 的3D散點圖
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')

        # 绘制 rnn_limit_rings_of_best_estimation_centers_flat_pca
        ax.scatter(rnn_limit_rings_of_best_estimation_centers_flat_pca[:,0], 
                rnn_limit_rings_of_best_estimation_centers_flat_pca[:,1], 
                rnn_limit_rings_of_best_estimation_centers_flat_pca[:,2], c='r', marker='o', alpha=0.2)

        # 加重繪製一個隨機選取的神經軌跡
        rnd_inx = task_id

        print("len of selected_nn_trj_pca: ", len(selected_nn_trj_pca))
        print("len of new_trial_len_record: ", len(new_trial_len_record))
        print("rnd_inx: ", rnd_inx)
        print(new_trial_len_record[rnd_inx])
        ax.scatter(selected_nn_trj_pca[rnd_inx][:,0], selected_nn_trj_pca[rnd_inx][:,1], selected_nn_trj_pca[rnd_inx][:,2], s=1)

        # 使用渐变颜色，绘制刚才的加重神经轨迹
        for i in range(len(selected_nn_trj_pca[rnd_inx])-1):
            ax.plot(selected_nn_trj_pca[rnd_inx][i:i+2,0], selected_nn_trj_pca[rnd_inx][i:i+2,1], selected_nn_trj_pca[rnd_inx][i:i+2,2], color=plt.cm.viridis(i/len(selected_nn_trj_pca[rnd_inx])))

        plt.show()
    
    
if __name__ == "__main__":
    main()