from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal


def main():

    seq_len = 15
    redundancy = 5
    diverse_set_capacity = 5
    collection_capacity = 20

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--seq_len", type=int, default=8)
    parser.add_argument("--redundancy", type=int, default=3)
    parser.add_argument("--diverse_set_capacity", type=int, default=5)
    parser.add_argument("--collection_capacity", type=int, default=20)
    parser.add_argument('--gpu_id', type=int, default=0)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration
    redundancy = args.redundancy
    seq_len = args.seq_len
    diverse_set_capacity = args.diverse_set_capacity
    collection_capacity = args.collection_capacity

    gpu_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    gpu_id = gpu_list[args.gpu_id]
    os.environ['CUDA_VISIBLE_DEVICES']=gpu_id

    print("redun: ", redundancy)
    print("seq_len", seq_len)

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    file_name = "obs_data_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
    obs_file = np.load("./logs/" + file_name)
    obs_data = obs_file["obs_data"]
    diverse_set_trajectoies = obs_file["diverse_set_trajectoies"]
    diverse_set_actions = obs_file["diverse_set_actions"]

    for i in range(diverse_set_trajectoies.shape[0]):
        diverse_set_trajectoies[i] = diverse_set_trajectoies[i] - diverse_set_trajectoies[i][0]

    print("obs_data.shape: ", obs_data.shape)
    print("diverse_set_trajectoies.shape: ", diverse_set_trajectoies.shape)
    print("diverse_set_actions.shape: ", diverse_set_actions.shape)

    # 将 diverse_set_trajectoies 的后两维合并成一维, 例如 (100, 8, 2) 变成 (100, 16)
    diverse_set_trajectoies_2d = diverse_set_trajectoies.reshape(-1, seq_len * 2)
    print("diverse_set_trajectoies.shape: ", diverse_set_trajectoies_2d.shape)

    """ 筛选低歧义性的 obs 序列
    """
    obs_seqs_low_ambiguity = []
    print("---------------- generating low ambiguity obs sequences...")
    for i in range(obs_data.shape[0]):
        progress_bar(i, obs_data.shape[0])
        obs_seq_low_ambiguity = []
        for j in range(obs_data.shape[1]):
            low_amb = True
            for k in range(obs_data.shape[2]-1):
                if np.sum(np.abs(obs_data[i,j,k] - obs_data[i,j,k+1])) == 0:
                    low_amb = False
                    break
            if low_amb:
                obs_seq_low_ambiguity.append(obs_data[i,j])
        
        obs_seqs_low_ambiguity.append(obs_seq_low_ambiguity)
        # print("obs_seq_low_ambiguity.shape: ", np.array(obs_seq_low_ambiguity).shape)

    """ 遍历所有轨迹片段, 收集每一条obs序列的最小 action 误差极限环
        要求最小 action 误差小于等于2
        收集满 collection_capacity 条合规轨迹后结束
    """
    collect_err_th = 3

    n_samples = 1000

    # 在 obs_seqs_low_ambiguity 的每种轨迹中选 collection_capacity 个 obs 序列
    obs_seqs_low_ambiguity_selected = []
    for i in range(len(obs_seqs_low_ambiguity)):
        # print("shape of obs_seqs_low_ambiguity[i]: ", np.array(obs_seqs_low_ambiguity[i]).shape)
        # print("collection_capacity ", collection_capacity)
        obs_seqs_low_ambiguity_selected.append(random.sample(obs_seqs_low_ambiguity[i], collection_capacity))
    obs_seqs_low_ambiguity_selected = np.array(obs_seqs_low_ambiguity_selected)
    print("obs_seqs_low_ambiguity_selected.shape: ", obs_seqs_low_ambiguity_selected.shape)

    rnn_limit_ring_of_best_estimation_collection = []
    obs_records_collection = []

    for geometry_id in range(obs_seqs_low_ambiguity_selected.shape[0]):

        progress_bar(geometry_id, obs_seqs_low_ambiguity_selected.shape[0])

        print("obs_seqs_low_ambiguity_selected.shape[0] : ", obs_seqs_low_ambiguity_selected.shape[0])
        print("obs_seqs_low_ambiguity_selected.shape[1] : ", obs_seqs_low_ambiguity_selected.shape[1])

        rnn_limit_ring_of_best_estimation = []

        obs_records = []

        for obs_seq_id in range(obs_seqs_low_ambiguity_selected.shape[1]):

            obs_record = obs_seqs_low_ambiguity_selected[geometry_id, obs_seq_id]
            # obs_records.append(obs_record)
            action_slice = diverse_set_actions[geometry_id]

            k1 = npr.randint(0, 1000000)
            rnn_state = model.initial_state_rnd(n_samples, k1)

            batched_periodic_obs = jnp.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0] for i in range(n_samples)])

            rnn_state_trajectory = []

            random_integers_i = 0
            def update_random_integers_i():
                nonlocal random_integers_i
                nonlocal seq_len
                random_integers_i += 1
                random_integers_i %= seq_len
            
            actionlets = []
            actionlet = []

            """ run forced dynamics
            """
            for t in range(rpl_config.life_duration):

                periodic_obs = obs_record[random_integers_i]
                batched_periodic_obs = jnp.array([periodic_obs for i in range(n_samples)])

                """ model forward 
                """
                rnn_state, y1 = model_forward(params, rnn_state, batched_periodic_obs, model)
                actions = get_action_vmap(y1)

                rnn_state_trajectory.append(np.array(rnn_state).copy())

                update_random_integers_i()
                actionlet.append(actions.copy())
                if random_integers_i == seq_len-1:
                    actionlets.append(actionlet.copy())
                    actionlet.clear()

            actionlets = actionlets[1:-2]
            actionlets = np.array(actionlets)
            # 将 actionlets 的第2维和第3维交换
            actionlets = np.swapaxes(actionlets, 1, 2)
            # print("shape of actionlets: ", actionlets.shape)

            # 取最后2个周期的 actionlets 作为极限环
            actionlets_limit_cycle = actionlets[-2]
            # print("shape of actionlets_limit_cycles: ", actionlets_limit_cycle.shape)

            # 将 rnn_state_trajectory 的第1、2维交换
            rnn_state_trajectory = np.array(rnn_state_trajectory)
            rnn_state_trajectory = np.swapaxes(rnn_state_trajectory, 0, 1)

            print("rnn_state_trajectory.shape: ", rnn_state_trajectory.shape)

            """ 逐个检测 action 序列和原始数据的相似度
            """
            diff = []

            for i in range(actionlets_limit_cycle.shape[0]):
                actionlet = actionlets_limit_cycle[i]

                # 计算 actionlet 和 action_slice 中不同元素的个数
                diff_n_elements = np.sum(actionlet != action_slice)

                diff.append(diff_n_elements)
            diff = np.array(diff)
            # print("min diff of set1: ", np.min(diff))

            min_diff = np.min(diff)
            minimum_index = np.argmin(diff)

            # print("min diff: ", min_diff)
            # print("minimum_index: ", minimum_index)

            if min_diff <= collect_err_th:
                rnn_limit_ring_of_best_estimation.append(rnn_state_trajectory[minimum_index,-2*seq_len:-1].copy())
                obs_records.append(obs_record.copy())

                # # 顯示 rnn_limit_ring_of_best_estimation 最後一個元素所佔的內存大小, 以GB為單位
                # print("rnn_limit_ring_of_best_estimation[-1].nbytes: ", rnn_limit_ring_of_best_estimation[-1].nbytes / 1024 / 1024 / 1024)
                # print("shape of rnn_limit_ring_of_best_estimation[-1]: ", rnn_limit_ring_of_best_estimation[-1].shape)

        rnn_limit_ring_of_best_estimation = np.array(rnn_limit_ring_of_best_estimation)
        print("rnn_limit_ring_of_best_estimation.shape: ", rnn_limit_ring_of_best_estimation.shape)

        obs_records = np.array(obs_records)
        print("obs_records.shape: ", obs_records.shape)

        rnn_limit_ring_of_best_estimation_collection.append(rnn_limit_ring_of_best_estimation.copy())
        obs_records_collection.append(obs_records.copy())


    # 保存 rnn_limit_ring_of_best_estimation_collection 这个 list 中的所有数据到同一个 npz
    # 创建一个空的字典，用于存储矩阵数据
    data = {}

    # 遍历 rnn_limit_ring_of_best_estimation_collection 中的矩阵
    for i, matrix in enumerate(rnn_limit_ring_of_best_estimation_collection):
        # 将每个矩阵存储到字典中的对应键名
        data[f'matrix_{i}'] = matrix

    # 将字典中的数据保存到 npz 文件
    file_name = "./logs/rnn_limit_rings_of_best_estimation_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
    np.savez(file_name, **data)

    # 保存 obs_records_collection 这个 list 中的所有数据到同一个 npz
    # 创建一个空的字典，用于存储矩阵数据
    data_obs = {}

    # 遍历 obs_records_collection 中的矩阵
    for i, matrix in enumerate(obs_records_collection):
        # 将每个矩阵存储到字典中的对应键名
        data_obs[f'matrix_{i}'] = matrix

    # 将字典中的数据保存到 npz 文件
    file_name = "./logs/obs_records_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
    np.savez(file_name, **data_obs)
    


if __name__ == "__main__":
    main()