from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

# analysis of phase space

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal

# 全局变量，用于存储和图像显示线程交互的数据
class imgview:
    global_image = None
    imgview_exit = False
    trajectory = []
    focus_i = 0
    traj_i = 0

imgview_data = imgview()

# 定义一个函数，用于在独立线程中显示图像
def show_image():
    grid_size_display = 20
    while not imgview_data.imgview_exit:
        # 检查全局变量是否有图像
        if imgview_data.global_image is not None:
            img = np.copy(imgview_data.global_image)
            state_x = imgview_data.trajectory[imgview_data.traj_i][0]
            state_y = imgview_data.trajectory[imgview_data.traj_i][1]
            cv2.circle(img, (state_y * grid_size_display + int(grid_size_display/2), state_x * grid_size_display + int(grid_size_display/2)), 7, (0, 0, 255), -1, cv2.LINE_AA)
            # 显示图像
            cv2.imshow("Image", img)
            key = cv2.waitKey(1)
            if key == ord('a'):
                imgview_data.focus_i -= 1
                print("imgview_data.focus_i: ", imgview_data.focus_i)
            elif key == ord('d'):
                imgview_data.focus_i += 1
                print("imgview_data.focus_i: ", imgview_data.focus_i)
        else:
            # 图像还未产生，等待100毫秒
            time.sleep(0.1)

def get_intrinsic_pc():

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--start_i", type=int, default=rpl_config.start_i)
    parser.add_argument("--end_i", type=int, default=rpl_config.end_i)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration
    rpl_config.start_i = args.start_i
    rpl_config.end_i = args.end_i

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)
    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    n_samples = 1000
    k1 = npr.randint(0, 1000000)
    rnn_state = model.initial_state_rnd(n_samples, k1)
    rnn_state_old = rnn_state.copy()
    diff = jnp.abs(rnn_state - rnn_state_old)
    rnn_state_old = rnn_state.copy()
    diff_norm = jnp.linalg.norm(diff, axis=1)
    diff_norm_old = diff_norm.copy()
    norm_std = diff_norm.copy()

    rnn_state_init = rnn_state.copy()

    obs_zero = jnp.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0] for i in range(n_samples)])

    rnn_state_trajectory = []

    for t in range(rpl_config.life_duration):

        if t == rpl_config.probe_point:
            rnn_state_init = rnn_state.copy()

        progress_bar(t, rpl_config.life_duration)

        """ model forward 
        """
        rnn_state, y1 = model_forward(params, rnn_state, obs_zero, model)
        diff = jnp.abs(rnn_state - rnn_state_old)
        rnn_state_old = rnn_state.copy()
        diff_norm = jnp.linalg.norm(diff, axis=1)
        norm_std = 0.4 * jnp.abs(diff_norm - diff_norm_old) + 0.6 * norm_std
        diff_norm_old = diff_norm.copy()

        rnn_state_trajectory.append(np.array(rnn_state).copy())
            
    print(rnn_state.shape)
    print(norm_std.shape)
    rnn_state_np = np.array(rnn_state)

    # 将 rnn_state_trajectory 展开成 rnn_state_np 的形状
    rnn_state_trajectory_np = np.array(rnn_state_trajectory)
    rnn_state_trajectory_np = rnn_state_trajectory_np.reshape(-1, rnn_state_trajectory_np.shape[-1])
    print("shape of rnn_state_trajectory_np: ", rnn_state_trajectory_np.shape)

    # 对 rnn_state_np 进行 PCA
    pca = PCA()
    # pca.fit(rnn_state_np)
    pca.fit(rnn_state_trajectory_np)

    # 打印 variance ratio
    print(pca.explained_variance_ratio_)

    rnn_state_np_pca = pca.transform(rnn_state_np)

    # 创建KMeans对象，指定聚类数为4
    kmeans = KMeans(n_clusters=4)
    # 对rnn_state_np_pca进行聚类
    kmeans.fit(rnn_state_np_pca)
    # 获取聚类中心的坐标
    cluster_centers = kmeans.cluster_centers_

    return pca, cluster_centers

def rnn_run(x, mat_intr, bias1):
    intr_vector = np.dot(x, mat_intr) + bias1
    intr_vector = np.tanh(intr_vector)
    return intr_vector

# 计算雅克比矩阵
@jax.jit
def rnn_run_vector(x, mat_intr, bias1):
    intr_vector = jnp.dot(x, mat_intr) + bias1
    intr_vector = jnp.tanh(intr_vector)
    return intr_vector - x
jacobian_fun = jax.jacrev(rnn_run_vector)
jacobian_vmap = jax.jit(jax.vmap(jacobian_fun, in_axes=(0,None,None)))

@jax.jit
def vector_field_taylor_expansion(x, x0, jacobian, mat_intr, bias1):
    vector = jnp.dot(jacobian, x - x0) + rnn_run_vector(x0, mat_intr, bias1)
    return vector
vector_field_taylor_expansion_vmap = jax.jit(jax.vmap(vector_field_taylor_expansion, in_axes=(0,0,None,None,None)))

# run linear dynamical system using vector_field_taylor_expansion
def run_LDS_one_step(x, x0, dt, jacobian, mat_intr, bias1):

    x_dot = vector_field_taylor_expansion(x, x0, jacobian, mat_intr, bias1)
    x1 = x + x_dot * dt

    return x1.copy()

def compute_linearized_err_1X(sample_point, mat_intr, bias1):

    jacobian_of_x = jacobian_fun(jnp.array(sample_point), mat_intr, bias1)

    sample_vector = rnn_run_vector(sample_point, mat_intr, bias1)
    sample_vector_len = np.linalg.norm(sample_vector)

    initial_state = sample_point
    current_state = initial_state.copy()
    current_state0 = current_state.copy()
    traj_len = 0

    next_state = rnn_run(initial_state, mat_intr, bias1)

    while traj_len < sample_vector_len:
        current_state0 = current_state.copy()
        current_state = run_LDS_one_step(current_state, initial_state, 0.01, jacobian_of_x, mat_intr, bias1)
        delta_state = current_state - initial_state
        delta_state_len = np.linalg.norm(delta_state)
        traj_len += delta_state_len

    diff_next_state_current_state = np.linalg.norm(next_state - current_state0)

    return diff_next_state_current_state, np.corrcoef(next_state, current_state0)[0, 1]

def compute_eig(sample_point, mat_intr, bias1):
    jacobian_of_x = jacobian_fun(jnp.array(sample_point), mat_intr, bias1)
    eigenvalues = np.linalg.eigvals(jacobian_of_x)
    if any(np.real(eigenvalues) > 0):
        return False, eigenvalues
    else:
        return True, eigenvalues


def compute_limit_point(sample_point, mat_intr, bias1):

    jacobian_of_x = jacobian_fun(jnp.array(sample_point), mat_intr, bias1)

    initial_state = sample_point
    current_state = initial_state.copy()

    for i in range(1000):
        current_state = run_LDS_one_step(current_state, initial_state, 0.01, jacobian_of_x, mat_intr, bias1)

    return current_state

def main():

    intrinsic_pca, cluster_centers = get_intrinsic_pc()
    # print(intrinsic_pca.explained_variance_ratio_)

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--start_i", type=int, default=rpl_config.start_i)
    parser.add_argument("--end_i", type=int, default=rpl_config.end_i)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration
    rpl_config.start_i = args.start_i
    rpl_config.end_i = args.end_i

    landscapes, states, goals = [], [], []
    
    landscape_, state_, goal_ = load_task(rpl_config.task_pth, display=False)

    landscapes.append(landscape_)
    states.append(state_)
    goals.append(goal_)

    states = jnp.array(states)
    goals = jnp.array(goals)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)

    bias1 = np.array(tree_leaves[0])
    mat1 = np.array(tree_leaves[1])
    print("mat1.shape: ", mat1.shape)

    mat_obs = np.array(tree_leaves[1])[rpl_config.nn_size:rpl_config.nn_size+10,:]
    mat_intr = np.array(tree_leaves[1])[0:rpl_config.nn_size,:]
    print("mat_obs.shape: ", mat_obs.shape)

    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    start_time = time.time()

    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = states.copy()
    # set goals of GE
    GE.batched_goals = goals.copy()
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs

    print("shape of concat_obs: ", concat_obs[0].shape)

    rnn_state = model.initial_state(GE.num_envs)
    step_count = 0
    trajectories = []
    goal_record = []
    HS_trajectory = []
    IPFs = []
    intr_field = []

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        step_count += 1

        # 将rnn_state[0]和concat_obs[0]拼接成一个新的向量
        new_vector = np.concatenate((rnn_state[0], concat_obs[0]))
        # 将新向量与mat1相乘，得到长度为128的向量
        result_vector = np.dot(new_vector, mat1) + bias1
        result_vector = np.tanh(result_vector)
        intr_vector = np.dot(rnn_state[0], mat_intr) + bias1
        intr_vector = np.tanh(intr_vector)

        IPF = result_vector - intr_vector
        IPFs.append(IPF)
        intr_field.append(intr_vector - rnn_state[0])

        trajectories.append(np.array(GE.batched_states[0]))
        HS_trajectory.append(np.array(rnn_state[0]))
        goal_record.append(GE.batched_goal_reached[0])

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)
        batched_goal_reached, concat_obs = GE.step(batched_actions, reset=True)

    print("shape of trajectories: ", np.array(trajectories).shape)
    print("shape of goal_record: ", np.array(goal_record).shape)
    print("shape of rnn_state: ", rnn_state.shape)
    print("shape of HS_trajectory: ", np.array(HS_trajectory).shape)

    HS_trajectory_np = np.array(HS_trajectory)

    HS_pca = intrinsic_pca.transform(np.array(HS_trajectory))
    IPF_pca = intrinsic_pca.transform(np.array(IPFs) + intrinsic_pca.mean_)
    intr_pca = intrinsic_pca.transform(np.array(intr_field) + intrinsic_pca.mean_)

    # 按照 goal_record 对 HS_pca 进行切分
    HS_pca_split = []
    split_start = 0
    for i in range(len(goal_record)):
        if goal_record[i] == 1:
            HS_pca_split.append(HS_pca[split_start:i+1])
            split_start = i+1
    HS_pca_split.append(HS_pca[split_start:])

    for s in HS_pca_split:
        print("shape of s: ", s.shape)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    piece_start = rpl_config.start_i
    piece_end = piece_start + 5

    end_point_collection = []
    start_point_collection = []

    eigenvalues_real_sums = []

    for piece_id in range(piece_start, piece_end):

        start_i = 0
        for k in range(piece_id):
            start_i += HS_pca_split[k].shape[0]
        
        end_i = HS_pca_split[piece_id].shape[0] + start_i

        end_point_collection.append(HS_pca_split[piece_id][-1])
        start_point_collection.append(HS_pca_split[piece_id][0])

        print("start_i: ", start_i)
        print("end_i: ", end_i)
        print("---------------------------")
            
        # 详细观察 [start_i:end_i] 区间内的数据
        # 1. 每个点的本征向量场
        # 2. 每个点的 obs扰动向量

        HS_trajectory_np_selected = HS_trajectory_np[start_i:end_i]
        color_convergence = []
        for k in range(HS_trajectory_np_selected.shape[0]):
            contraction, eigenvalues = compute_eig(HS_trajectory_np_selected[k], mat_intr, bias1)
            # print("contraction: ", contraction)
            if contraction:
                color_convergence.append('g')
            else:
                color_convergence.append('r')

            eigenvalues_real_sum = np.sum(np.real(eigenvalues) > 0)
            eigenvalues_real_sums.append(eigenvalues_real_sum)
        
        # 计算每个点的颜色值
        colors = np.linspace(0, 1, len(HS_pca))
        ax.scatter(HS_pca[start_i:end_i, 0], HS_pca[start_i:end_i, 1], HS_pca[start_i:end_i, 2], c=color_convergence, s=50)
        # 绘制3D折线图，并为每个点设置颜色
        for i in range(start_i, end_i - 1):
            # ax.plot([HS_pca[i, 0], HS_pca[i+1, 0]], [HS_pca[i, 1], HS_pca[i+1, 1]], [HS_pca[i, 2], HS_pca[i+1, 2]], color=plt.cm.RdYlBu(colors[i]), alpha=0.3)
            ax.plot([HS_pca[i, 0], HS_pca[i+1, 0]], [HS_pca[i, 1], HS_pca[i+1, 1]], [HS_pca[i, 2], HS_pca[i+1, 2]], color='b', alpha=(piece_id-piece_start)/(piece_end-piece_start)+0.05)
        for i in range(start_i, end_i):
            ax.quiver(HS_pca[i, 0], HS_pca[i, 1], HS_pca[i, 2], IPF_pca[i, 0], IPF_pca[i, 1], IPF_pca[i, 2], color='g', length=0.2, arrow_length_ratio=0.3)
            ax.quiver(HS_pca[i, 0], HS_pca[i, 1], HS_pca[i, 2], intr_pca[i, 0], intr_pca[i, 1], intr_pca[i, 2], color='r', length=0.2, arrow_length_ratio=0.3)

        # 寻找 cluster_centers 距离 HS_pca[start_i:end_i] 的中心最近的聚类中心
        # 计算 HS_pca[start_i:end_i] 的中心
        HS_pca_center = np.mean(HS_pca[start_i:end_i], axis=0)
        # 找到距离 HS_pca[start_i:end_i] 中心最近的聚类中心
        nearest_center_index = np.argmin([np.linalg.norm(HS_pca_center - center) for center in cluster_centers])
        nearest_center = cluster_centers[nearest_center_index]
        ax.scatter(nearest_center[0], nearest_center[1], nearest_center[2], c='b', s=140)

    # 创建图例项对象
    converged_legend = ax.scatter([], [], marker='o', color='green', label='points with Convergent Local LDS')
    diverged_legend = ax.scatter([], [], marker='o', color='red', label='points with Divergent Local LDS')

    handles_=[converged_legend, diverged_legend]
    for piece_id in range(piece_start, piece_end):
        _line = Line2D([0], [0], color='b', alpha=(piece_id-piece_start)/(piece_end-piece_start)+0.05, label = "Loop " + str(piece_id))
        handles_.append(_line)

    quiver_1 = ax.quiver([], [], [], [], [], [], color='g', length=0.2, arrow_length_ratio=0.5, label='IPF')
    quiver_2 = ax.quiver([], [], [], [], [], [], color='r', length=0.2, arrow_length_ratio=0.5, label='IVF')
    handles_.append(quiver_1)
    handles_.append(quiver_2)

    for end_point in end_point_collection:
        ax.text(end_point[0], end_point[1], end_point[2], 'O', color='black', fontsize=2.5 * plt.rcParams['font.size'])
    for start_point in start_point_collection:
        ax.text(start_point[0], start_point[1], start_point[2], 'X', color='black', fontsize=2.5 * plt.rcParams['font.size'])

    # 添加图例
    ax.legend(handles=handles_, bbox_to_anchor=(1.5, 0.5))

    plt.show()

    # 将 eigenvalues_real_sums 绘制成 plot
    plt.plot(eigenvalues_real_sums)
    # 命名纵轴为"sum of positive real parts of eigen values", 横轴命名为 "steps"
    plt.ylabel("sum of positive real parts of eigen values")
    plt.xlabel("steps")
    plt.show()
    

if __name__ == "__main__":
    main()