from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal

def qc_single_goal_record(record, log = False, test_steps = 1000):

    var_threshold = 1

    trial_len = 0
    trial_len_old = 0
    step_ = 0
    trial_len_var = 0
    first_trial_len = -1
    qc_pass = False
    for i in range(record.shape[0]):
        step_ += 1
        if record[i] == True:
            if first_trial_len == -1:
                first_trial_len = step_
            trial_len = step_
            if log:
                print("trial_len: ", trial_len)
            step_ = 0
            # compute the variance of trial_len
            trial_len_var = 0.9 * abs(trial_len - trial_len_old) + 0.1 * trial_len_var
            trial_len_old = trial_len
            if trial_len_var <= var_threshold and i >= 3*test_steps/4 and trial_len <= 20:
                qc_pass = True
                # break
            else:
                qc_pass = False
    if qc_pass:
        return True, trial_len_var
    else:
        return False, 100000


def main():

    seq_len = 15

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    """ load task
    """
    landscapes, states, goals = [], [], []
    rf_task_file = "./data/rf_pass_task_"+nn_type+".txt"
    rf_task_list = []
    for line in open(rf_task_file):
        rf_task_list.append(line.strip())
    print("len of tf_task_list: ", len(rf_task_list))
    dir_path = "./data/adaptive_trajectory_optimization/task_envs_rnd/"
    for tt in rf_task_list:
        # get complete path
        task_pth = dir_path + str(tt)
        landscape, state, goal = load_task(task_pth, display=False)

        landscapes.append(landscape)
        states.append(state)
        goals.append(goal)

    num_envs = len(landscapes)

    states = jnp.array(states)
    goals = jnp.array(goals)

    print("shape of states: ", states.shape)
    print("shape of goals: ", goals.shape)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)
    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = states.copy()
    # set goals of GE
    GE.batched_goals = goals.copy()
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs
    
    trajectories = []
    goal_record = []
    obs_record = []
    neural_states = []
    action_record = []

    rnn_state = model.initial_state(GE.num_envs)

    rkey = jax.random.PRNGKey(np.random.randint(0, 1000000))

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)
        batched_goal_reached, concat_obs = GE.step(batched_actions, reset=True)

        trajectories.append(np.array(GE.batched_states))
        obs_record.append(np.array(concat_obs))
        action_record.append(np.array(batched_actions))
        neural_states.append(np.array(rnn_state))
        goal_record.append(batched_goal_reached)

    trajectories = np.array(trajectories)
    obs_record = np.array(obs_record)
    action_record = np.array(action_record)
    neural_states = np.array(neural_states)

    trajectories = np.swapaxes(trajectories, 0, 1)
    obs_record = np.swapaxes(obs_record, 0, 1)
    action_record = np.swapaxes(action_record, 0, 1)
    neural_states = np.swapaxes(neural_states, 0, 1)

    goal_record = np.array(goal_record).T

    print("shape of trajectories: ", trajectories.shape)
    print("shape of obs_record: ", obs_record.shape)
    print("shape of action_record: ", action_record.shape)
    print("shape of neural_states: ", neural_states.shape)
    print("shape of goal_record: ", goal_record.shape)

    n_qc_pass = 0
    qc_pass = []
    qc_fail = []
    trial_len_vars = []
    for i in range(goal_record.shape[0]):
        progress_bar(i, goal_record.shape[0])
        qc_true, trial_len_var = qc_single_goal_record(goal_record[i], log = False, test_steps = rpl_config.life_duration)
        if qc_true:
            n_qc_pass += 1
            qc_pass.append(i)
            trial_len_vars.append(trial_len_var)
        else:
            qc_fail.append(i)
    
    print("shape of qc_pass: ", np.array(qc_pass).shape)
    print("shape of qc_fail: ", np.array(qc_fail).shape)
    print("qc ratio: ", n_qc_pass / goal_record.shape[0])

    """ 分割最终的循环路径
    """
    # 定义最终轨迹为：一条轨迹中最后一次 goal（包含） 到前一次 goal的下一步（包含）之间的轨迹
    # 将所有 qc_pass 的最终轨迹提取出来
    trajectories_final_qc_pass = []
    # 生成 goal_reached 的倒序版本
    goal_reached_reverse = goal_record[:, ::-1]
    trajectories_reverse = trajectories[:, ::-1]

    last_goals = []
    second_last_goals = []

    for i in range(goal_record.shape[0]):
        progress_bar(i, goal_record.shape[0])
        # 找到最后一次 goal（包含） 到前一次 goal的下一步（包含）之间的轨迹
        last_goal = 0
        second_last_goal = 0
        for j in range(goal_reached_reverse.shape[1]):
            if goal_reached_reverse[i,j] == True:
                last_goal = j
                break
        for j in range(last_goal+1, goal_reached_reverse.shape[1]):
            if goal_reached_reverse[i,j] == True:
                second_last_goal = j
                break
        # 提取最终轨迹
        final_traj_reverse = trajectories_reverse[i,last_goal:second_last_goal]
        final_traj = final_traj_reverse[::-1]
        trajectories_final_qc_pass.append(final_traj)

        last_goals.append(last_goal)
        second_last_goals.append(second_last_goal)

    last_goals = np.array(last_goals)
    second_last_goals = np.array(second_last_goals)

    print("shape of last_goals: ", last_goals.shape)
    print("shape of second_last_goals: ", second_last_goals.shape)

    # 记录所有 last_goals 位置的 position(trajectories中) 和 neural_state(neural_states中)
    last_goals_positions = []
    last_goals_neural_states = []
    last_obs = []
    for i in range(len(second_last_goals)):
        last_goals_positions.append(trajectories[i,rpl_config.life_duration - second_last_goals[i]])
        last_goals_neural_states.append(neural_states[i,rpl_config.life_duration - second_last_goals[i]])
        last_obs.append(obs_record[i,rpl_config.life_duration - second_last_goals[i]])

    last_goals_positions = np.array(last_goals_positions)
    last_goals_neural_states = np.array(last_goals_neural_states)
    last_obs = np.array(last_obs)

    print("shape of last_goals_positions: ", last_goals_positions.shape)
    print("shape of last_goals_neural_states: ", last_goals_neural_states.shape)

    def resume_traj(last_goals_positions, last_goals_neural_states, last_obs, model, params, rkey, epsilon = 0.1):
        """ 从 last_goal_position 开始，resume trajectory
        """
        
        # 重置 GE
        GE.reset()
        # set states of GE
        GE.batched_states = states.copy()
        # set goals of GE
        GE.batched_goals = goals.copy()
        GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
        GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
        GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)

        GE.batched_states = jnp.array(last_goals_positions)
        GE.concat_obs = jnp.array(last_obs) #get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
        concat_obs = GE.concat_obs

        rnn_state = jnp.array(last_goals_neural_states)

        # 生成和 rnn_state 形状一样的 random noise, 每个维度分布在[-epsilon,epsilon]中
        noise = jax.random.uniform(rkey, shape = rnn_state.shape, minval = -epsilon, maxval = epsilon)

        if epsilon != 0:
            rnn_state0 = jnp.copy(rnn_state)
            rnn_state = rnn_state + noise
            print("diff of rnn_state: ", jnp.sum(jnp.abs(rnn_state - rnn_state0)))

        test_steps = 100
        rnd_env = np.random.randint(0, num_envs)
        print("rnd_env: ", rnd_env)

        trajectories_1 = []
        goal_record_1 = []
        obs_record_1 = []
        neural_states_1 = []
        action_record_1 = []
        for t in range(test_steps):
            progress_bar(t, test_steps)

            """ model forward and step the env
            """
            rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
            batched_actions = get_action_vmap(y1)
            batched_goal_reached, concat_obs = GE.step(batched_actions, reset=True)

            trajectories_1.append(np.array(GE.batched_states))
            obs_record_1.append(np.array(concat_obs))
            action_record_1.append(np.array(batched_actions))
            neural_states_1.append(np.array(rnn_state))
            goal_record_1.append(batched_goal_reached)

        trajectories_1 = np.array(trajectories_1)
        obs_record_1 = np.array(obs_record_1)
        action_record_1 = np.array(action_record_1)
        neural_states_1 = np.array(neural_states_1)
        goal_record_1 = np.array(goal_record_1).T

        return goal_record_1, trajectories_1, obs_record_1, action_record_1, neural_states_1
    
    # 生成一个随机的 PRNGKey
    key = jax.random.PRNGKey(np.random.randint(0, 1000))
    def init_weights_r(key, shape):
        return jax.random.normal(key, shape)
    random_params = jax.tree_map(lambda x: init_weights_r(key, x.shape), params)
    
    def calculate_lyapunov_distribution(params_to_use, model_name):
        """计算给定参数的Lyapunov指数分布"""
        print(f"\n=== Calculating Lyapunov distribution for {model_name} ===")
        
        goal_record_1, trajectories_1, obs_record_1, action_record_1, neural_states_1 = resume_traj(last_goals_positions, last_goals_neural_states, last_obs, model, params_to_use, rkey, epsilon = 0.01)
        goal_record_2, trajectories_2, obs_record_2, action_record_2, neural_states_2 = resume_traj(last_goals_positions, last_goals_neural_states, last_obs, model, params_to_use, rkey, epsilon = 0)

        print("shape of neural_states_1: ", neural_states_1.shape)
        print("shape of neural_states_2: ", neural_states_2.shape)

        # 计算 trajectories_1 和 trajectories_2 的差异的数组
        diff_trajectories = []
        for i in range(neural_states_1.shape[0]):
            # 计算两个轨迹元素的欧氏距离
            diff_traj = np.linalg.norm(neural_states_1[i] - neural_states_2[i], axis = 1)
            # 计算两个轨迹元素的空间距离
            diff_traj_space = np.linalg.norm(trajectories_1[i] - trajectories_2[i], axis = 1)
            diff_trajectories.append(diff_traj+diff_traj_space)
        diff_trajectories = np.array(diff_trajectories)

        diff_trajectories = diff_trajectories.T

        print("shape of diff_trajectories: ", diff_trajectories.shape)

        def lyapunov_exp(dist):
            lnr = np.log(dist)
            slope, intercept = np.polyfit(list(range(len(lnr))), lnr, deg=1)
            return slope

        # 计算每条轨迹的 lyapunov exponent
        lyapunov_exp_list = []
        for i in range(diff_trajectories.shape[0]):
            lyapunov_exp_list.append(lyapunov_exp(diff_trajectories[i]))

        lyapunov_exp_list = np.array(lyapunov_exp_list)

        # 计算100bin直方图
        hist, bin_edges = np.histogram(lyapunov_exp_list, bins = 100)
        max_idx = np.argmax(hist)
        peak_value = bin_edges[max_idx]
        
        print(f"{model_name} Lyapunov exponent peak: {peak_value}")
        
        return lyapunov_exp_list, hist, bin_edges, peak_value

    # 计算训练模型的Lyapunov分布
    trained_le_list, trained_hist, trained_bins, trained_peak = calculate_lyapunov_distribution(params, "Trained Model")
    
    # 计算随机模型的Lyapunov分布
    random_le_list, random_hist, random_bins, random_peak = calculate_lyapunov_distribution(random_params, "Random Model")

    # 保存直方图数据到numpy数组
    print("\n=== Saving histogram data ===")
    
    # 保存训练模型的直方图数据
    np.save("./logs/lyapunov_hist_trained_model.npy", trained_hist)
    np.save("./logs/lyapunov_bins_trained_model.npy", trained_bins)
    np.save("./logs/lyapunov_values_trained_model.npy", trained_le_list)
    
    # 保存随机模型的直方图数据
    np.save("./logs/lyapunov_hist_random_model.npy", random_hist)
    np.save("./logs/lyapunov_bins_random_model.npy", random_bins)
    np.save("./logs/lyapunov_values_random_model.npy", random_le_list)

    # 绘制对比图 - 分别显示
    plt.figure(figsize=(15, 6))
    
    # 训练模型的分布
    plt.subplot(1, 2, 1)
    plt.hist(trained_le_list, bins = 100, alpha=0.7, color='blue')
    plt.axvline(x = trained_peak, color = 'r', linewidth=2)
    plt.text(trained_peak, 25, f'{trained_peak:.4f}', fontsize = 16, ha='center')
    plt.title('Trained Model Lyapunov Exponent Distribution', fontsize=14)
    plt.xlabel('Lyapunov Exponent')
    plt.ylabel('Frequency')
    
    # 随机模型的分布
    plt.subplot(1, 2, 2)
    plt.hist(random_le_list, bins = 100, alpha=0.7, color='orange')
    plt.axvline(x = random_peak, color = 'r', linewidth=2)
    plt.text(random_peak, 25, f'{random_peak:.4f}', fontsize = 16, ha='center')
    plt.title('Random Model Lyapunov Exponent Distribution', fontsize=14)
    plt.xlabel('Lyapunov Exponent')
    plt.ylabel('Frequency')
    
    plt.tight_layout()
    plt.savefig('./logs/lyapunov_comparison_separate.png', dpi=300, bbox_inches='tight')
    plt.show()

    # 绘制叠加对比图 - 在同一个图中显示两个分布
    plt.figure(figsize=(12, 8))
    
    # 使用半透明颜色绘制两个分布
    plt.hist(trained_le_list, bins=100, alpha=0.6, color='blue', label='Trained Model', density=True)
    plt.hist(random_le_list, bins=100, alpha=0.6, color='orange', label='Random Model', density=True)
    
    # 绘制峰值线
    plt.axvline(x=trained_peak, color='blue', linewidth=3, linestyle='--', alpha=0.8, label=f'Trained Peak: {trained_peak:.4f}')
    plt.axvline(x=random_peak, color='orange', linewidth=3, linestyle='--', alpha=0.8, label=f'Random Peak: {random_peak:.4f}')
    
    # 添加峰值标注
    plt.text(trained_peak, plt.ylim()[1]*0.9, f'{trained_peak:.4f}', fontsize=14, ha='center', color='white',
             bbox=dict(boxstyle='round,pad=0.3', facecolor='blue', alpha=0.7))
    plt.text(random_peak, plt.ylim()[1]*0.8, f'{random_peak:.4f}', fontsize=14, ha='center',
             bbox=dict(boxstyle='round,pad=0.3', facecolor='orange', alpha=0.7))
    
    plt.title('Lyapunov Exponent Distribution Comparison\n(Trained vs Random Model)', fontsize=16)
    plt.xlabel('Lyapunov Exponent', fontsize=14)
    plt.ylabel('Probability Density', fontsize=14)
    plt.legend(fontsize=12)
    plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.savefig('./logs/lyapunov_comparison_overlay.png', dpi=300, bbox_inches='tight')
    plt.show()

    # 生成相同的SVG版本
    print("\n=== Generating SVG version ===")
    
    # 保存当前后端
    current_backend = plt.get_backend()
    
    # 创建新的figure用于SVG输出
    fig_svg = plt.figure(figsize=(12, 8))
    
    # 使用完全相同的半透明颜色绘制两个分布
    plt.hist(trained_le_list, bins=100, alpha=0.6, color='blue', label='Trained Model', density=True)
    plt.hist(random_le_list, bins=100, alpha=0.6, color='orange', label='Random Model', density=True)
    
    # 绘制峰值线（完全相同的参数）
    plt.axvline(x=trained_peak, color='blue', linewidth=3, linestyle='--', alpha=0.8, label=f'Trained Peak: {trained_peak:.4f}')
    plt.axvline(x=random_peak, color='orange', linewidth=3, linestyle='--', alpha=0.8, label=f'Random Peak: {random_peak:.4f}')
    
    # 添加峰值标注（完全相同的参数和颜色）
    plt.text(trained_peak, plt.ylim()[1]*0.9, f'{trained_peak:.4f}', fontsize=14, ha='center', color='white',
             bbox=dict(boxstyle='round,pad=0.3', facecolor='blue', alpha=0.7))
    plt.text(random_peak, plt.ylim()[1]*0.8, f'{random_peak:.4f}', fontsize=14, ha='center',
             bbox=dict(boxstyle='round,pad=0.3', facecolor='orange', alpha=0.7))
    
    # 完全相同的标题和标签
    plt.title('Lyapunov Exponent Distribution Comparison\n(Trained vs Random Model)', fontsize=16)
    plt.xlabel('Lyapunov Exponent', fontsize=14)
    plt.ylabel('Probability Density', fontsize=14)
    plt.legend(fontsize=12)
    plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    fig_svg.savefig('./logs/lyapunov_comparison_overlay.svg', format='svg', bbox_inches='tight')
    plt.close(fig_svg)

    print(f"\nResults saved:")
    print(f"- Trained model peak Lyapunov exponent: {trained_peak:.4f}")
    print(f"- Random model peak Lyapunov exponent: {random_peak:.4f}")
    print(f"- Histogram data saved in ./logs/lyapunov_*.npy files")
    print(f"- PNG comparison plot saved as ./logs/lyapunov_comparison_overlay.png")
    print(f"- SVG comparison plot saved as ./logs/lyapunov_comparison_overlay.svg")


if __name__ == "__main__":
    main()
