from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection

def progress_bar(current, total, barLength = 100):
    percent = float(current) * 100 / total
    arrow = '-' * int(percent/100 * barLength - 1) + '>'
    spaces = ' ' * (barLength - len(arrow))

    print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
    sys.stdout.flush()

@partial(jax.jit, static_argnums=(3,))
def model_forward(variables, state, x, model):
    """ forward pass of the model
    """
    return model.apply(variables, state, x)

@jit
def get_action(y):
    return jnp.argmax(y)
get_action_vmap = jax.vmap(get_action)

# load landscape and states from file
def load_task(pth = "./logs/task.json", display = True):
    # open json file
    with open(pth, "r") as f:
        data = json.load(f)
        landscape = data["data"]
        state = data["state"]
        goal = data["goal"]
        if display:
            print("state: ", state)
            print("goal: ", goal)
            print("landscape: ", landscape)
    return landscape, state, goal


def main():

    seq_len = 15
    redundancy = 5
    diverse_set_capacity = 5
    collection_capacity = 20

    """ parse arguments
    """
    rpl_config = ReplayConfig()

    parser = argparse.ArgumentParser()
    parser.add_argument("--model_pth", type=str, default=rpl_config.model_pth)
    parser.add_argument("--map_size", type=int, default=rpl_config.map_size)
    parser.add_argument("--task_pth", type=str, default=rpl_config.task_pth)
    parser.add_argument("--log_pth", type=str, default=rpl_config.log_pth)
    parser.add_argument("--nn_size", type=int, default=rpl_config.nn_size)
    parser.add_argument("--nn_type", type=str, default=rpl_config.nn_type)
    parser.add_argument("--show_kf", type=str, default=rpl_config.show_kf)
    parser.add_argument("--visualization", type=str, default=rpl_config.visualization)
    parser.add_argument("--video_output", type=str, default=rpl_config.video_output)
    parser.add_argument("--life_duration", type=int, default=rpl_config.life_duration)
    parser.add_argument("--seq_len", type=int, default=8)
    parser.add_argument("--redundancy", type=int, default=3)
    parser.add_argument("--diverse_set_capacity", type=int, default=5)
    parser.add_argument("--collection_capacity", type=int, default=20)
    parser.add_argument('--gpu_id', type=int, default=0)

    args = parser.parse_args()

    rpl_config.model_pth = args.model_pth
    rpl_config.map_size = args.map_size
    rpl_config.task_pth = args.task_pth
    rpl_config.log_pth = args.log_pth
    rpl_config.nn_size = args.nn_size
    rpl_config.nn_type = args.nn_type
    rpl_config.show_kf = args.show_kf
    rpl_config.visualization = args.visualization
    rpl_config.video_output = args.video_output
    rpl_config.life_duration = args.life_duration
    redundancy = args.redundancy
    seq_len = args.seq_len
    diverse_set_capacity = args.diverse_set_capacity
    collection_capacity = args.collection_capacity

    gpu_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    gpu_id = gpu_list[args.gpu_id]
    os.environ['CUDA_VISIBLE_DEVICES']=gpu_id

    print("redun: ", redundancy)
    print("seq_len", seq_len)

    nn_type = ''
    if rpl_config.nn_type == "vanilla":
        nn_type = "vanilla"
    elif rpl_config.nn_type == "gru":
        nn_type = "gru"

    """ load model
    """
    params = load_weights(rpl_config.model_pth)

    """ create agent
    """
    if rpl_config.nn_type == "vanilla":
        model = RNN(hidden_dims = rpl_config.nn_size)
    elif rpl_config.nn_type == "gru":
        model = GRU(hidden_dims = rpl_config.nn_size)

    # 生成一个二进制串集合，其中每个元素都是一个长度为8的二进制串，要求这些二进制串从 000000000 遍历到 111111111
    binary_set = set()
    for i in range(256):
        binary_string = format(i, '08b')
        binary_set.add(binary_string)
    # 将 binary_set 中的 11111111 元素删除
    binary_set.remove("11111111")
    # 将 binary_set 中所有格式为 "x1x1x1x1" 的元素删除
    binary_set_bk = binary_set.copy()
    for binary in binary_set_bk:
        if binary[1] == '1' and binary[3] == '1' and binary[4] == '1' and binary[6] == '1':
            binary_set.remove(binary)
    print("binary_set: ", binary_set)
    
    # 对 binary_set 进行排序，使得其中的元素从 00000000 遍历到 11111110
    binary_list = sorted(binary_set)
    # 将 binary_list 中的每个元素进行这样的操作：在中间插入一个0，例如 "00000000" 转换为 "000000000"；然后在结尾处插入一个0，例如 "000000000" 转换为 "0000000000"
    binary_list = [i[:4] + "0" + i[4:] + "0" for i in binary_list]
    # 将 binary_list 中的元素转换为整数数组，例如 "00000000" 转换为 [0, 0, 0, 0, 0, 0, 0, 0]
    binary_list = [list(map(int, list(i))) for i in binary_list]
    binary_list = np.array(binary_list)
    print("shape of binary_list: ", binary_list.shape)

    binary_list = np.array(binary_list)

    file_name = "obs_data_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
    obs_file = np.load("./logs/" + file_name)
    obs_data = obs_file["obs_data"]
    diverse_set_trajectoies = obs_file["diverse_set_trajectoies"]
    diverse_set_actions = obs_file["diverse_set_actions"]

    for i in range(diverse_set_trajectoies.shape[0]):
        diverse_set_trajectoies[i] = diverse_set_trajectoies[i] - diverse_set_trajectoies[i][0]

    print("obs_data.shape: ", obs_data.shape)
    print("diverse_set_trajectoies.shape: ", diverse_set_trajectoies.shape)
    print("diverse_set_actions.shape: ", diverse_set_actions.shape)

    # 将 diverse_set_trajectoies 的后两维合并成一维, 例如 (100, 8, 2) 变成 (100, 16)
    diverse_set_trajectoies_2d = diverse_set_trajectoies.reshape(-1, seq_len * 2)
    print("diverse_set_trajectoies.shape: ", diverse_set_trajectoies_2d.shape)

    """ 筛选低歧义性的 obs 序列
    """
    obs_seqs_low_ambiguity = []
    for i in range(obs_data.shape[0]):
        progress_bar(i, obs_data.shape[0])
        obs_seq_low_ambiguity = []
        for j in range(obs_data.shape[1]):
            low_amb = True
            for k in range(obs_data.shape[2]-1):
                if np.sum(np.abs(obs_data[i,j,k] - obs_data[i,j,k+1])) == 0:
                    low_amb = False
                    break
            if low_amb:
                obs_seq_low_ambiguity.append(obs_data[i,j])
        
        obs_seqs_low_ambiguity.append(obs_seq_low_ambiguity)
        # print("obs_seq_low_ambiguity.shape: ", np.array(obs_seq_low_ambiguity).shape)
    

    """ 遍历所有轨迹片段, 收集每一条obs序列的最小 action 误差极限环
        要求最小 action 误差小于等于2
        收集满 collection_capacity 条合规轨迹后结束
    """
    collect_err_th = 3

    n_samples = 100

    # 在 obs_seqs_low_ambiguity 的每种轨迹中选 collection_capacity 个 obs 序列
    obs_seqs_low_ambiguity_selected = []
    for i in range(len(obs_seqs_low_ambiguity)):
        # print("shape of obs_seqs_low_ambiguity[i]: ", np.array(obs_seqs_low_ambiguity[i]).shape)
        # print("shape of obs_data[i]: ", np.array(obs_data[i]).shape)
        obs_seqs_low_ambiguity_selected.append(random.sample(obs_seqs_low_ambiguity[i], collection_capacity))
    obs_seqs_low_ambiguity_selected = np.array(obs_seqs_low_ambiguity_selected)
    print("obs_seqs_low_ambiguity_selected.shape: ", obs_seqs_low_ambiguity_selected.shape)

    obs_records_collection = []
    rnn_limit_ring_collection = []

    for geometry_id in range(obs_seqs_low_ambiguity_selected.shape[0]):

        progress_bar(geometry_id, obs_seqs_low_ambiguity_selected.shape[0])

        rnn_limit_ring_all = []

        obs_records = []

        for obs_seq_id in range(obs_seqs_low_ambiguity_selected.shape[1]):

            obs_record = obs_seqs_low_ambiguity_selected[geometry_id, obs_seq_id]

            obs_seq_binary = []
            for obs in obs_record:
                # 查找 obs 在 binary_list 中的序号
                index_ = np.argwhere(np.all(obs == binary_list, axis=1))
                obs_seq_binary.append(index_[0][0])

            k1 = npr.randint(0, 1000000)
            rnn_state = model.initial_state_rnd(n_samples, k1)

            obs_zero = jnp.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0] for i in range(n_samples)])

            rnn_state_trajectory = []

            random_integers = obs_seq_binary

            random_integers_i = 0
            def update_random_integers_i():
                nonlocal random_integers_i
                nonlocal seq_len
                random_integers_i += 1
                random_integers_i %= seq_len
            
            """ run forced dynamics
            """
            for t in range(rpl_config.life_duration):

                random_obs = binary_list[random_integers[random_integers_i]]
                obs_zero = jnp.array([random_obs for i in range(n_samples)])

                """ model forward 
                """
                rnn_state, y1 = model_forward(params, rnn_state, obs_zero, model)

                rnn_state_trajectory.append(np.array(rnn_state).copy())
                update_random_integers_i()

            # 将 rnn_state_trajectory 的第1、2维交换
            rnn_state_trajectory = np.array(rnn_state_trajectory)
            rnn_state_trajectory = np.swapaxes(rnn_state_trajectory, 0, 1)

            rnn_limit_rings = rnn_state_trajectory[:,-2*seq_len:-1].copy()

            rnn_limit_ring_all.append(rnn_limit_rings.copy())

            obs_records.append(obs_record.copy())

        obs_records = np.array(obs_records)
        print("obs_records.shape: ", obs_records.shape)

        rnn_limit_ring_all = np.array(rnn_limit_ring_all)
        print("rnn_limit_ring_all.shape: ", rnn_limit_ring_all.shape)

        rnn_limit_ring_collection.append(rnn_limit_ring_all.copy())
        obs_records_collection.append(obs_records.copy())

        # 显示 rnn_limit_ring_collection[-1] 所占的内存大小，用Mb表示
        rnn_limit_ring_collection_size = sys.getsizeof(rnn_limit_ring_collection[-1]) / 1024 / 1024
        print("rnn_limit_ring_collection_size: ", rnn_limit_ring_collection_size)


    # 保存 rnn_limit_ring_collection 这个 list 中的所有数据到同一个 npz
    # 创建一个空的字典，用于存储矩阵数据
    data = {}

    # 遍历 rnn_limit_ring_collection 中的矩阵
    for i, matrix in enumerate(rnn_limit_ring_collection):
        # 将每个矩阵存储到字典中的对应键名
        data[f'matrix_{i}'] = matrix

    # 将字典中的数据保存到 npz 文件
    file_name = "./logs/rnn_limit_ring_collection_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
    np.savez(file_name, **data)

    # 保存 obs_records_collection 这个 list 中的所有数据到同一个 npz
    # 创建一个空的字典，用于存储矩阵数据
    data_obs = {}

    # 遍历 obs_records_collection 中的矩阵
    for i, matrix in enumerate(obs_records_collection):
        # 将每个矩阵存储到字典中的对应键名
        data_obs[f'matrix_{i}'] = matrix

    # 将字典中的数据保存到 npz 文件
    file_name = "./logs/obs_records_" + nn_type + "_" + str(seq_len) + "_" + str(redundancy) + "_" + str(diverse_set_capacity) + ".npz"
    np.savez(file_name, **data_obs)
    


if __name__ == "__main__":
    main()