from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from flax import linen as nn

from jax import tree_util

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal

def qc_single_goal_record(record, log = False, test_steps = 1000):

    var_threshold = 1

    trial_len = 0
    trial_len_old = 0
    step_ = 0
    trial_len_var = 0
    first_trial_len = -1
    qc_pass = False
    for i in range(record.shape[0]):
        step_ += 1
        if record[i] == True:
            if first_trial_len == -1:
                first_trial_len = step_
            trial_len = step_
            if log:
                print("trial_len: ", trial_len)
            step_ = 0
            # compute the variance of trial_len
            trial_len_var = 0.9 * abs(trial_len - trial_len_old) + 0.1 * trial_len_var
            trial_len_old = trial_len
            if trial_len_var <= var_threshold and i >= 3*test_steps/4 and trial_len <= 20:
                qc_pass = True
                # break
            else:
                qc_pass = False
    if qc_pass:
        return True, trial_len_var
    else:
        return False, 100000

# 计算雅克比矩阵
"""
W_ir = params["params"]["GRUCell_0"]["ir"]["kernel"]
W_iz = params["params"]["GRUCell_0"]["iz"]["kernel"]
W_in = params["params"]["GRUCell_0"]["in"]["kernel"]
W_hr = params["params"]["GRUCell_0"]["hr"]["kernel"]
W_hz = params["params"]["GRUCell_0"]["hz"]["kernel"]
W_hn = params["params"]["GRUCell_0"]["hn"]["kernel"]
b_in = params["params"]["GRUCell_0"]["in"]["bias"]
b_hr = params["params"]["GRUCell_0"]["hr"]["bias"]
b_hz = params["params"]["GRUCell_0"]["hz"]["bias"]
b_hn = params["params"]["GRUCell_0"]["hn"]["bias"]
"""
def gru_step(x, h, W_ir, W_iz, W_in, W_hr, W_hz, W_hn, b_in, b_hn):
    # Directly use leaves by index
    # Reset gate
    r = jax.nn.sigmoid(jnp.dot(x, W_ir) + jnp.dot(h, W_hr))
    
    # Update gate
    z = jax.nn.sigmoid(jnp.dot(x, W_iz) + jnp.dot(h, W_hz))
    
    # Candidate hidden state
    n = jnp.tanh(jnp.dot(x, W_in) + b_in + r * (jnp.dot(h, W_hn) + b_hn))
    
    # New hidden state
    h_new = (1 - z) * n + z * h
    
    return h_new

@jax.jit
def rnn_run_vector(state, obs, W_ir, W_iz, W_in, W_hr, W_hz, W_hn, b_in, b_hn):
    state1 = gru_step(obs, state, W_ir, W_iz, W_in, W_hr, W_hz, W_hn, b_in, b_hn)
    return state1 - state

jacobian_fun = jax.jacrev(rnn_run_vector)
jacobian_fun_batch = jax.vmap(jacobian_fun, in_axes=(0, None, None, None, None, None, None, None, None, None))

def compute_eig(sample_point, W_ir, W_iz, W_in, W_hr, W_hz, W_hn, b_in, b_hr, b_hz, b_hn):
    obs = jnp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
    jacobian_of_x = jacobian_fun(jnp.array(sample_point), obs, W_ir, W_iz, W_in, W_hr, W_hz, W_hn, b_in, b_hr, b_hz, b_hn)
    eigenvalues = np.linalg.eigvals(jacobian_of_x)
    if any(np.real(eigenvalues) > 0):
        return False
    else:
        return True
    
@jax.jit
def check_diverge(eigenvalues):
    return jnp.any(jnp.real(eigenvalues) > 0)

check_diverge_batch = jax.vmap(check_diverge)

def compute_eig_batch(sample_points, W_ir, W_iz, W_in, W_hr, W_hz, W_hn, b_in, b_hn):
    obs = jnp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
    jacobian_of_x = jacobian_fun_batch(sample_points, obs, W_ir, W_iz, W_in, W_hr, W_hz, W_hn, b_in, b_hn)
    eigenvalues = np.linalg.eigvals(jacobian_of_x)
    
    # 批量统计是否有发散的情况
    diverge_batch = check_diverge_batch(eigenvalues)
    return diverge_batch


def main():

    seq_len = 15

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    """ load task
    """
    landscapes, states, goals = [], [], []
    rf_task_file = "./data/rf_pass_task_"+nn_type+".txt"
    rf_task_list = []
    for line in open(rf_task_file):
        rf_task_list.append(line.strip())
    print("len of tf_task_list: ", len(rf_task_list))
    dir_path = "./data/adaptive_trajectory_optimization/task_envs_rnd/"
    for tt in rf_task_list:
        # get complete path
        task_pth = dir_path + str(tt)
        landscape, state, goal = load_task(task_pth, display=False)

        landscapes.append(landscape)
        states.append(state)
        goals.append(goal)

    num_envs = len(landscapes)

    states = jnp.array(states)
    goals = jnp.array(goals)

    print("shape of states: ", states.shape)
    print("shape of goals: ", goals.shape)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)
    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    # get elements of params
    tree_leaves = jax.tree_util.tree_leaves(params)
    for i in range(len(tree_leaves)):
        print("shape of leaf ", i, ": ", tree_leaves[i].shape)
    
    """
    r = \sigma(W_{ir} x + W_{hr} h + b_{hr})
    z = \sigma(W_{iz} x + W_{hz} h + b_{hz})
    n = \tanh(W_{in} x + b_{in} + r * (W_{hn} h + b_{hn}))
    h' = (1 - z) * n + z * h
    """
    # print(params["params"]["GRUCell_0"]["hn"]["kernel"].shape)
    W_ir = params["params"]["GRUCell_0"]["ir"]["kernel"]
    W_iz = params["params"]["GRUCell_0"]["iz"]["kernel"]
    W_in = params["params"]["GRUCell_0"]["in"]["kernel"]
    W_hr = params["params"]["GRUCell_0"]["hr"]["kernel"]
    W_hz = params["params"]["GRUCell_0"]["hz"]["kernel"]
    W_hn = params["params"]["GRUCell_0"]["hn"]["kernel"]
    b_in = params["params"]["GRUCell_0"]["in"]["bias"]
    b_hn = params["params"]["GRUCell_0"]["hn"]["bias"]
    b_hz = b_hn #params["params"]["GRUCell_0"]["hz"]["bias"]
    b_hr = b_hn #params["params"]["GRUCell_0"]["hr"]["bias"]

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = states.copy()
    # set goals of GE
    GE.batched_goals = goals.copy()
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs
    
    trajectories = []
    goal_record = []
    obs_record = []
    neural_states = []
    action_record = []

    rnn_state = model.initial_state(GE.num_envs)

    rkey = jax.random.PRNGKey(np.random.randint(0, 1000000))

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)
        batched_goal_reached, concat_obs = GE.step(batched_actions, reset=True)

        trajectories.append(np.array(GE.batched_states))
        obs_record.append(np.array(concat_obs))
        action_record.append(np.array(batched_actions))
        neural_states.append(np.array(rnn_state))
        goal_record.append(batched_goal_reached)

    trajectories = np.array(trajectories)
    obs_record = np.array(obs_record)
    action_record = np.array(action_record)
    neural_states = np.array(neural_states)

    trajectories = np.swapaxes(trajectories, 0, 1)
    obs_record = np.swapaxes(obs_record, 0, 1)
    action_record = np.swapaxes(action_record, 0, 1)
    neural_states = np.swapaxes(neural_states, 0, 1)

    goal_record = np.array(goal_record).T

    print("shape of trajectories: ", trajectories.shape)
    print("shape of obs_record: ", obs_record.shape)
    print("shape of action_record: ", action_record.shape)
    print("shape of neural_states: ", neural_states.shape)
    print("shape of goal_record: ", goal_record.shape)

    n_qc_pass = 0
    qc_pass = []
    qc_fail = []
    trial_len_vars = []
    for i in range(goal_record.shape[0]):
        progress_bar(i, goal_record.shape[0])
        qc_true, trial_len_var = qc_single_goal_record(goal_record[i], log = False, test_steps = rpl_config.life_duration)
        if qc_true:
            n_qc_pass += 1
            qc_pass.append(i)
            trial_len_vars.append(trial_len_var)
        else:
            qc_fail.append(i)
    
    print("shape of qc_pass: ", np.array(qc_pass).shape)
    print("shape of qc_fail: ", np.array(qc_fail).shape)
    print("qc ratio: ", n_qc_pass / goal_record.shape[0])

    sample_neural_states = neural_states[:,-5:,:]
    sample_neural_states_flat = sample_neural_states.reshape(-1, sample_neural_states.shape[-1])

    # 对 last_goals_neural_states 逐一计算 LDS 收敛性
    # for i in range(last_goals_neural_states.shape[0]):
    #     progress_bar(i, last_goals_neural_states.shape[0])
    #     if compute_eig(last_goals_neural_states[i], W_ir, W_iz, W_in, W_hr, W_hz, W_hn, b_in, b_hr, b_hz, b_hn):
    #         print("converge")
    #     else:
    #         print("diverge")
    
    diverge_batch = compute_eig_batch(sample_neural_states_flat, W_ir, W_iz, W_in, W_hr, W_hz, W_hn, b_in, b_hn)
    
    # 统计 diverge_batch 有多少个 True和False
    n_diverge = jnp.sum(diverge_batch)

    print("n_diverge ratio: ", n_diverge / diverge_batch.shape[0])


if __name__ == "__main__":
    main()