from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal

def preprocess(trjs, max_length):
    processed_trjs = []
    for i in range(len(trjs)):
        trj = trjs[i]
        if trj.shape[0] < max_length:
            last_element = trj[-1]
            processed_trj = np.concatenate([trj, np.repeat(last_element[np.newaxis,:], max_length - trj.shape[0], axis=0)], axis=0)
            processed_trjs.append(processed_trj)
        else:
            processed_trjs.append(trj)
    processed_trjs = np.array(processed_trjs)
    return processed_trjs

def main():

    seq_len = 25
    redundancy = 5
    diverse_set_capacity = 5

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--seq_len", type=int, default=8)
    parser.add_argument("--redundancy", type=int, default=3)
    parser.add_argument("--diverse_set_capacity", type=int, default=5)
    parser.add_argument('--gpu_id', type=int, default=0)


    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration
    redundancy = args.redundancy
    seq_len = args.seq_len
    diverse_set_capacity = args.diverse_set_capacity

    print("redun: ", redundancy)
    print("seq_len", seq_len)

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    """ load task
    """
    # load all tasks in the dir "./data/adaptive_trajectory_optimization/task_envs/" using load_task()
    landscapes, states, goals = [], [], []
    dir_path = "./data/adaptive_trajectory_optimization/task_envs/"
    file_list = os.listdir(dir_path)
    file_count = len(file_list)
    for tt in range(file_count):
        progress_bar(tt, file_count)
        # get complete path
        task_pth = dir_path + "task_" + str(tt) + ".json"
        landscape, state, goal = load_task(task_pth, display=False)

        landscapes.append(landscape)
        states.append(state)
        goals.append(goal)

    num_envs = len(landscapes)

    states = jnp.array(states)
    goals = jnp.array(goals)

    print("shape of states: ", states.shape)
    print("shape of goals: ", goals.shape)

    """ load model
    """
    params = load_weights(rpl_config.model_pth)
    
    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # check if param fits the agent
    if rpl_config.nn_type == "vanilla":
        assert params["params"]["Dense_0"]["kernel"].shape[0] == rpl_config.nn_size + 10

    """ create grid env
    """
    start_time = time.time()
    GE = GridEnv(landscapes = landscapes, width = 12, height = 12, num_envs_per_landscape = 1, reward_free=True)
    GE.reset()
    print("time taken to create envs: ", time.time() - start_time)

    # set states of GE
    GE.batched_states = states.copy()
    # set goals of GE
    GE.batched_goals = goals.copy()
    GE.init_batched_states, GE.init_batched_goals = jnp.copy(GE.batched_states), jnp.copy(GE.batched_goals)
    GE.batched_goal_reached = batch_compute_goal_reached(GE.batched_states, GE.batched_goals)
    GE.last_batched_goal_reached = jnp.copy(GE.batched_goal_reached)
    GE.concat_obs = get_ideal_obs_vmap(GE.batched_envs, GE.batched_states, GE.batched_goals, GE.last_batched_goal_reached)
    concat_obs = GE.concat_obs
    
    trajectories = []
    goal_reached = []
    obs_record = []
    neural_states = []
    action_record = []

    rnn_state = model.initial_state(GE.num_envs)

    rkey = jax.random.PRNGKey(np.random.randint(0, 1000000))

    for t in range(rpl_config.life_duration):

        progress_bar(t, rpl_config.life_duration)

        """ model forward and step the env
        """
        rnn_state, y1 = model_forward(params, rnn_state, concat_obs, model)
        batched_actions = get_action_vmap(y1)
        batched_goal_reached, concat_obs = GE.step(batched_actions, reset=False)

        trajectories.append(np.array(GE.batched_states))
        obs_record.append(np.array(concat_obs))
        action_record.append(np.array(batched_actions))

    trajectories = np.array(trajectories)
    obs_record = np.array(obs_record)
    action_record = np.array(action_record)

    trajectories = np.swapaxes(trajectories, 0, 1)
    obs_record = np.swapaxes(obs_record, 0, 1)
    action_record = np.swapaxes(action_record, 0, 1)

    print("shape of trajectories: ", trajectories.shape)
    print("shape of obs_record: ", obs_record.shape)
    print("shape of action_record: ", action_record.shape)

    seq_len_ub = 25
    seq_len_lb = 3

    """ 采样随机轨迹
    1. 从随机起点处，采样一组长度随机的轨迹
    2. 随机挑选两组轨迹，然后将它们拼接起来成为新数据
    """
    random_group = []
    contat_group = []

    for k in range(10):

        # 采样随机轨迹
        sample_n = 1000
        random_trajectories = []
        sample_env_idx = random.sample(range(num_envs), sample_n)
        sample_trj_start_idx = random.sample(range(len(sample_env_idx)), trajectories.shape[1])

        for i in range(sample_n):
            env_idx = sample_env_idx[i]
            trj_start_idx = sample_trj_start_idx[i]
            trj_len = random.randint(seq_len_lb, seq_len_ub)
            trj = trajectories[env_idx, trj_start_idx:trj_start_idx+trj_len]
            random_trajectories.append(trj)
            # print("len of trj: ", len(trj))

        random_trajectories = preprocess(random_trajectories, seq_len_ub)
        print("shape of random_trajectories: ", random_trajectories.shape)

        # 随机挑选两组轨迹，然后将它们拼接起来成为新数据
        group_A_idx = random.sample(range(sample_n), sample_n//2)
        group_B_idx = random.sample(range(sample_n), sample_n//2)

        group_A = random_trajectories[group_A_idx]
        group_B = random_trajectories[group_B_idx]

        group_A_end_point = group_A[:,-1]

        for i in range(group_B.shape[0]):
            group_B[i, :] -= group_B[i, 0]
            group_B[i, :] += group_A_end_point[i]

        new_trajectories = np.concatenate([group_A, group_B], axis=1)

        random_group.append(random_trajectories)
        contat_group.append(new_trajectories)


    random_group = np.concatenate(random_group, axis=0)
    contat_group = np.concatenate(contat_group, axis=0)

    print("shape of random_group: ", random_group.shape)
    print("shape of contat_group: ", contat_group.shape)

    # 从 new_trajectories 中随机挑选一个样本，plot 出来
    idx = random.randint(0, contat_group.shape[0]-1)
    trj = contat_group[idx]
    fig, ax = plt.subplots()
    ax.plot(trj[:,0], trj[:,1])
    plt.show()

    np.save("./logs/random_group.npy", random_group)
    np.save("./logs/contat_group.npy", contat_group)


if __name__ == "__main__":
    main()