import json
import os
from traceback import print_tb
from jax import grad
import jax.numpy as jnp
from jax import jit
import time
from matplotlib import pyplot as plt
import numpy as np
import numpy.random as npr
import jax
import jax.numpy as jnp
from jax import device_put
from jax import jit, grad, lax, random
from jax.example_libraries import optimizers
from jax.example_libraries import stax
from jax.example_libraries.stax import Dense, FanOut, Relu, Softplus, Sigmoid, FanInSum
from jax.nn import sigmoid
from functools import partial
from jax import vmap
from flax import linen as nn
from flax.training import train_state
from flax import struct
from jax import lax

from jax import tree_util
from jax.tree_util import tree_structure
from jax.tree_util import tree_flatten, tree_unflatten

import jax.experimental.sparse as sparse

import optax
import cv2

from openTSNE import TSNE
from sklearn.manifold import Isomap

# def load_tasks(pth = "./logs/parallel_task.json"):
#     with open(pth, "r") as f:
#         data = json.load(f)
#         tasks = data["tasks"]
#         landscape = data["landscape"]
#     return tasks, landscape
# rnn_waterfall = np.load("./logs/rnn_state_waterfall1680366946.npy")
# lifetime_trajectory = np.load("./logs/lifetime_trajectory1680366946.npy")
# tasks, landscape = load_tasks(pth = "./logs/parallel_task1680366946.json")
# # pick a random task
# task_id = 3499 #np.random.randint(0, len(tasks))
# print("task_id: ", task_id)

# # take part of the data
# rnn_waterfall = rnn_waterfall[:, task_id, :]
# # get the goal position of the task
# goal_x = tasks[task_id][2]
# goal_y = tasks[task_id][3]
# print("task : ", tasks[task_id])
# # get lifetime trajectory of the task
# task_lifetime_trajectory = lifetime_trajectory[:, task_id, :]

# rnn_waterfall_goal = []
# k = 0
# ks = [i for i in range(rnn_waterfall.shape[0])]
# # for i in task_lifetime_trajectory:
# #     k += 1
# #     if i[0] == goal_x and i[1] == goal_y:
# #         rnn_waterfall_goal.append(rnn_waterfall[k, :])
# #         ks.append(k)
# #         print("goal found: ", k)

# ks = np.array(ks)
# # rnn_waterfall_goal = np.array(rnn_waterfall_goal)
# tsne_embedding = TSNE().fit(rnn_waterfall)
# # visualize the t-SNE embedding, and color the points according to k
# plt.scatter(tsne_embedding[:, 0], tsne_embedding[:, 1], c=ks)
# plt.colorbar()
# plt.show()

# # take part of the data
# rnn_waterfall = rnn_waterfall[-1, :, :]

# print("shape of rnn_waterfall: ", rnn_waterfall.shape)
# step_matrix = np.zeros((rnn_waterfall.shape[0], rnn_waterfall.shape[1]))
# for i in range(rnn_waterfall.shape[0]):
#     for j in range(rnn_waterfall.shape[1]):
#         step_matrix[i, j] = i

# # flatten the data
# rnn_waterfall = rnn_waterfall.reshape(-1, 128)
# step_matrix = step_matrix.reshape(-1, 1)
# print("shape of rnn_waterfall: ", rnn_waterfall.shape)
# print("shape of step_matrix: ", step_matrix.shape)
# tsne_embedding = TSNE().fit(rnn_waterfall)
# # visualize the t-SNE embedding, and color the points according to the step_matrix
# plt.scatter(tsne_embedding[:, 0], tsne_embedding[:, 1])
# plt.colorbar()
# plt.show()


rnn_log_pth = "./data/adaptive_trajectory_optimization/rnn_state_opt_log.npy"
trj_log_pth = "./data/adaptive_trajectory_optimization/trj_log.json"

# rnn_log_pth = "./data/adaptive_trajectory_optimization/rnn_state_opt_log_32.npy"
# trj_log_pth = "./data/adaptive_trajectory_optimization/trj_log_32.json"

rnn_log_pth = "./data/adaptive_trajectory_optimization/rnn_state_opt_log_128.npy"
trj_log_pth = "./data/adaptive_trajectory_optimization/trj_log_128.json"

rnn_state_opt_log = np.load(rnn_log_pth)

print("shape of rnn_state_opt_log: ", rnn_state_opt_log.shape)

# erase the first element of rnn_state_opt_log
rnn_state_opt_log = rnn_state_opt_log[1:]

# perform t-SNE on rnn_state_opt_log
tsne_embedding = TSNE().fit(rnn_state_opt_log)
print("shape of tsne_embedding: ", tsne_embedding.shape)
# visualize the t-SNE embedding
plt.scatter(tsne_embedding[:, 0], tsne_embedding[:, 1])
plt.show()

# perform k-means on tsne_embedding to get the cluster labels
from sklearn.cluster import KMeans
# kmeans = KMeans(n_clusters=4, random_state=0).fit(tsne_embedding)
kmeans = KMeans(n_clusters=4, random_state=0).fit(rnn_state_opt_log)
print("shape of kmeans.labels_: ", kmeans.labels_.shape)
print("kmeans.labels_: ", kmeans.labels_)
# visualize the k-means clustering result
plt.scatter(tsne_embedding[:, 0], tsne_embedding[:, 1], c=kmeans.labels_)
plt.show()

with open(trj_log_pth, "r") as f:
    trj_json = json.load(f)
    trj_log = trj_json["data"]


print("shape of trj_log: ", len(trj_log))
# print(len(trj_log), len(trj_log[0]), len(trj_log[0][0]))

# draw each element of trj_log one single opencv canvas
img_seq = []
task_set = []
scale = 4
for i in range(len(trj_log)):
    # check if trj_log[i] is empty
    if len(trj_log[i]) == 0:
        img_seq.append(np.zeros((60, 60, 3), np.uint8))
        task_set.append((0,0))
        print("-----------------------------------------empty trj_log: ", i)
        continue

    img = np.zeros((60, 60, 3), np.uint8)
    for j in range(len(trj_log[i])-1):
        cv2.line(img, (int(trj_log[i][j][0]/scale), int(trj_log[i][j][1]/scale)), (int(trj_log[i][j+1][0]/scale), int(trj_log[i][j+1][1]/scale)), (255, 255, 255), 1)
    # draw a bigger dot at the start point
    # print(trj_log[i][0][0], trj_log[i][0][1])
    # get start location
    start_x, start_y = trj_log[i][0][0], trj_log[i][0][1]
    # get goal location
    goal_x, goal_y = trj_log[i][-1][0], trj_log[i][-1][1]
    # get task
    task = (goal_x-start_x, goal_y-start_y)
    task_set.append(task)
    cv2.circle(img, (int(trj_log[i][0][0]/scale), int(trj_log[i][0][1]/scale)), 2, (255, 255, 255), -1)
    # draw a red dot at the end point
    cv2.circle(img, (int(trj_log[i][-1][0]/scale), int(trj_log[i][-1][1]/scale)), 2, (0, 0, 255), -1)
    # draw a white border
    cv2.rectangle(img, (0, 0), (59, 59), (255, 255, 255), 1)
    img_seq.append(img)

n_img = len(img_seq)

# rearrange img_seq on a big canvas with a grid formation
n_row = 20
n_col = 20
big_img = np.zeros((n_row*60, n_col*60, 3), np.uint8)
img_id = 0
for i in range(n_row):
    for j in range(n_col):
        if img_id < n_img:
            big_img[i*60:(i+1)*60, j*60:(j+1)*60, :] = img_seq[img_id]
        img_id += 1

big_img_cpy = big_img.copy()

img_id = 0
# draw border for each grid with color corresponding to the cluster label
# make a color table
c_table = np.zeros((10, 3), np.uint8)
c_table[0, :] = (255, 0, 0)
c_table[1, :] = (0, 255, 0)
c_table[2, :] = (0, 0, 255)
c_table[3, :] = (255, 255, 0)
c_table[4, :] = (255, 0, 255)
c_table[5, :] = (0, 255, 255)
c_table[6, :] = (255, 120, 53)
c_table[7, :] = (120, 255, 255)
c_table[8, :] = (255, 255, 120)
c_table[9, :] = (120, 0, 120)

num_clusters = 10

# var_clt_mean_less_than_0_1 = [1, 5, 6, 11, 13, 14, 15, 16, 17, 18, 19, 22, 23, 24, 25, 27, 28, 34, 44, 46, 57, 62, 65, 68, 80, 82, 83, 88, 89, 96, 98, 100, 102, 104, 108, 113, 116, 120, 122]
var_clt_mean_less_than_0_1 = [0, 1, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 39, 41, 43, 44, 46, 49, 52, 54, 57, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 71, 74, 77, 79, 80, 82, 83, 84, 85, 86, 88, 89, 90, 91, 92, 96, 97, 98, 99, 100, 101, 102, 103, 104, 106, 107, 108, 109, 110, 113, 115, 116, 118, 120, 121, 122, 123, 124, 125, 126]
rnn_population_ = rnn_state_opt_log[0, var_clt_mean_less_than_0_1]
print(rnn_population_.shape)
rnn_feature_0 = np.zeros_like(rnn_population_)  # 1, -1
rnn_feature_1 = np.zeros_like(rnn_population_)  # 1, 1
rnn_feature_2 = np.zeros_like(rnn_population_)  # -1, -1
rnn_feature_3 = np.zeros_like(rnn_population_)  # -1, 1

n_rnn_feature_0 = 0
n_rnn_feature_1 = 0
n_rnn_feature_2 = 0
n_rnn_feature_3 = 0

for i in range(len(rnn_state_opt_log)):
    if kmeans.labels_[i] == 0:
        rnn_feature_0 += rnn_state_opt_log[i, var_clt_mean_less_than_0_1]
        n_rnn_feature_0 += 1
    elif kmeans.labels_[i] == 1:
        rnn_feature_1 += rnn_state_opt_log[i, var_clt_mean_less_than_0_1]
        n_rnn_feature_1 += 1
    elif kmeans.labels_[i] == 2:
        rnn_feature_2 += rnn_state_opt_log[i, var_clt_mean_less_than_0_1]
        n_rnn_feature_2 += 1
    elif kmeans.labels_[i] == 3:
        rnn_feature_3 += rnn_state_opt_log[i, var_clt_mean_less_than_0_1]
        n_rnn_feature_3 += 1

rnn_feature_0 = rnn_feature_0 / n_rnn_feature_0
rnn_feature_1 = rnn_feature_1 / n_rnn_feature_1
rnn_feature_2 = rnn_feature_2 / n_rnn_feature_2
rnn_feature_3 = rnn_feature_3 / n_rnn_feature_3

print("rnn_feature_0 = ", rnn_feature_0.tolist())
print("rnn_feature_1 = ", rnn_feature_1.tolist())
print("rnn_feature_2 = ", rnn_feature_2.tolist())
print("rnn_feature_3 = ", rnn_feature_3.tolist())

# # compute the distance of rnn_state_opt_log[83] to each cluster center
# sim_to_feature0 = np.linalg.norm(rnn_state_opt_log[83]-rnn_feature_0)
# sim_to_feature1 = np.linalg.norm(rnn_state_opt_log[83]-rnn_feature_1)
# sim_to_feature2 = np.linalg.norm(rnn_state_opt_log[83]-rnn_feature_2)
# sim_to_feature3 = np.linalg.norm(rnn_state_opt_log[83]-rnn_feature_3)

# print("sim_to_features: ", sim_to_feature0, sim_to_feature1, sim_to_feature2, sim_to_feature3)
# print(trj_log[13][-1][0], trj_log[13][-1][1])

# exit()

for i in range(n_row):
    for j in range(n_col):
        if img_id < n_img:
            cv2.rectangle(big_img, (j*60+3, i*60+3), ((j+1)*60-3, (i+1)*60-3), 
            (int(c_table[kmeans.labels_[img_id],0]), int(c_table[kmeans.labels_[img_id],1]), int(c_table[kmeans.labels_[img_id],2]))
            , 3)
        img_id += 1

# draw the big canvas
cv2.imshow("big_img", big_img)
cv2.waitKey(0)

# visualize the task set on a single image
task_img = np.zeros((60*scale*3, 60*scale*3, 3), np.uint8)
for i in range(len(task_set)):
    # draw a line from center to the task+center
    center_x, center_y = task_img.shape[0]//2, task_img.shape[1]//2
    img_id = i
    cv2.line(task_img, (center_x, center_y), (center_x+task_set[i][0], center_y+task_set[i][1]), 
    (int(c_table[kmeans.labels_[img_id],0]), int(c_table[kmeans.labels_[img_id],1]), int(c_table[kmeans.labels_[img_id],2]))
    # (255,0,0)
    , 1)
    if kmeans.labels_[i] == 0:
        print(i)
    
cv2.imshow("task_img", task_img)
cv2.waitKey(0)

# collect rnn_state_opt_log with corresponding cluster label
rnn_state_opt_log_cluster = []
img_ids = []
for i in range(len(kmeans.labels_)):
    if kmeans.labels_[i] == 0:
        rnn_state_opt_log_cluster.append(rnn_state_opt_log[i])
        img_ids.append(i)

rnn_state_opt_log_cluster = np.array(rnn_state_opt_log_cluster)

var_clt = []
rnn_state_opt_log_clusters = []
for clt in range(4):
    # collect rnn_state_opt_log with corresponding cluster label
    rnn_state_opt_log_cluster = []
    img_ids = []
    for i in range(len(kmeans.labels_)):
        if kmeans.labels_[i] == clt:
            rnn_state_opt_log_cluster.append(rnn_state_opt_log[i])
            img_ids.append(i)
    rnn_state_opt_log_clusters.append(rnn_state_opt_log_cluster)

    # compute variance of each element of rnn_state_opt_log_cluster
    var = np.var(rnn_state_opt_log_cluster, axis=0)
    var_clt.append(var)
    
# visualize the variance var_clt
plt.figure()
plt.plot(var_clt[0], label='cluster 0')
plt.plot(var_clt[1], label='cluster 1')
plt.plot(var_clt[2], label='cluster 2')
plt.plot(var_clt[3], label='cluster 3')
plt.legend()
plt.show()

var_clt_mean = np.mean(var_clt, axis=0)
print("cross-task variance: ", var_clt_mean)
plt.figure()
plt.title("mean of cross-task variance")
plt.plot(var_clt_mean)
plt.show()

# sort var_clt with id as lable
var_clt = np.array(var_clt)
var_clt = var_clt[:, np.argsort(var_clt_mean)]
plt.figure()
plt.plot(var_clt[0], label='cluster 0')
plt.plot(var_clt[1], label='cluster 1')
plt.plot(var_clt[2], label='cluster 2')
plt.plot(var_clt[3], label='cluster 3')
plt.legend()
plt.show()

var_clt_mean_sorted = np.sort(var_clt_mean)
plt.figure()
plt.plot(var_clt_mean_sorted)
plt.title("sorted mean of variance of each dimension of rnn_state_opt_log")
plt.show()

"""
static embedding analysis
"""
figures = []
neural_populations = []
pop_centers = []
for v in range(1, 20):
    var_th = v*0.01
    print("var_th: ", var_th)
    # collect the id of elements in var_clt_mean whose value is less than 0.1
    var_clt_mean_less_than_0_1 = []
    for i in range(len(var_clt_mean)):
        if var_clt_mean[i] < var_th:
            var_clt_mean_less_than_0_1.append(i)

    # print("var_clt_mean_less_than_0_1 = ", var_clt_mean_less_than_0_1)
    neural_populations.append(var_clt_mean_less_than_0_1)

    # extract the corresponding rnn_state_opt_log with id in var_clt_mean_less_than_0_1
    rnn_state_opt_log_stationary = []
    for i in range(len(rnn_state_opt_log)):
        rnn_state_opt_log_stationary.append(rnn_state_opt_log[i][var_clt_mean_less_than_0_1])

    rnn_state_opt_log_stationary = np.array(rnn_state_opt_log_stationary)
    print(rnn_state_opt_log_stationary.shape)

    # perform t-SNE on rnn_state_opt_log_stationary
    tsne_embedding = TSNE().fit(rnn_state_opt_log_stationary)
    kmeans = KMeans(n_clusters=4).fit(rnn_state_opt_log_stationary)
    labels = kmeans.labels_
    centers = kmeans.cluster_centers_
    kmeans = KMeans(n_clusters=4).fit(rnn_state_opt_log_stationary)
    labels = kmeans.labels_
    centers = kmeans.cluster_centers_

    # 将centers中的元素用逗号隔开
    centers_str = np.array2string(centers, separator=', ')
    # print("centers = ", centers_str)
    pop_centers.append(centers_str)
    # # visualize the t-SNE embedding
    # plt.scatter(tsne_embedding[:, 0], tsne_embedding[:, 1])
    # plt.title("var_th = {}, dimensions_selected = {}".format(var_th, rnn_state_opt_log_stationary.shape[1]))
    # plt.show()
    # get the pyplot figure as an opencv image
    fig = plt.gcf()
    fig.canvas.draw()
    img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
    img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
    # clear the pyplot figure
    plt.clf()
    figures.append(img)

# 打开文件，如果文件不存在则创建
with open('./logs/log.txt', 'w') as f:
    # 重定向输出到文件
    os.dup2(f.fileno(), 1)
    # 执行选中的代码
    for i in range(len(pop_centers)):
        print("pop_centers = ", pop_centers[i])
        print("neural_populations = ", neural_populations[i])

# # # concatenate the figures
# # big_img = np.concatenate(figures, axis=1)
# # cv2.imshow("figures", big_img)
# # cv2.waitKey(0)
# # cv2.imwrite("./logs/static_embedding_analysis.png", big_img)
# # save the figures
# for i in range(len(figures)):
#     cv2.imwrite("./logs/static_embedding_analysis_{}.png".format(i), figures[i])


# """
# dynamic embedding analysis
# """
# figures = []
# for v in range(1, 60):
    
#     var_th = v*0.01
#     print("var_th: ", var_th)

#     # collect the id of elements in var_clt_mean whose value is less than 0.1
#     var_clt_mean_less_than_0_1 = []
#     for i in range(len(var_clt_mean)):
#         if var_clt_mean[i] >= var_th:
#             var_clt_mean_less_than_0_1.append(i)
    
#     # # draw a neuron map to show the selected neurons
#     # # each neuron is a white circle with radius 10
#     # # the shape of neuron_img is (w=128*10, h=40, c=3)
#     # neuron_img = np.zeros((40, 128*10, 3), dtype=np.uint8)
#     # for i in range(128):
#     #     cv2.circle(neuron_img, (i*10+5, 20), 4, (255, 255, 255), -1, cv2.LINE_AA)
#     #     if i in var_clt_mean_less_than_0_1:
#     #         cv2.circle(neuron_img, (i*10+5, 20), 4, (0, 0, 255), -1, cv2.LINE_AA)
#     # cv2.imshow("neuron_img", neuron_img)
#     # cv2.waitKey(0)
#     # cv2.imwrite("./logs/neuron_img.png", neuron_img)

#     # extract the corresponding rnn_state_opt_log with id in var_clt_mean_less_than_0_1
#     rnn_state_opt_log_dynamic = []
#     for i in range(len(rnn_state_opt_log)):
#         rnn_state_opt_log_dynamic.append(rnn_state_opt_log[i][var_clt_mean_less_than_0_1])

#     rnn_state_opt_log_dynamic = np.array(rnn_state_opt_log_dynamic)
#     print(rnn_state_opt_log_dynamic.shape)

#     # perform t-SNE on rnn_state_opt_log_dynamic
#     tsne_embedding = TSNE().fit(rnn_state_opt_log_dynamic)

#     # kmeans = KMeans(n_clusters=4, random_state=0).fit(tsne_embedding)
#     # img_id = 0
#     # big_img_cpy2 = big_img.copy()
#     # for i in range(n_row):
#     #     for j in range(n_col):
#     #         if img_id < n_img:
#     #             cv2.rectangle(big_img_cpy2, (j*60+3, i*60+3), ((j+1)*60-3, (i+1)*60-3), 
#     #             (int(c_table[kmeans.labels_[img_id],0]), int(c_table[kmeans.labels_[img_id],1]), int(c_table[kmeans.labels_[img_id],2]))
#     #             , 3)
#     #         img_id += 1
#     # cv2.imshow("big_img_cpy2", big_img_cpy2)
#     # cv2.waitKey(0)

#     # visualize the t-SNE embedding
#     plt.scatter(tsne_embedding[:, 0], tsne_embedding[:, 1])
#     # name the figure var_th
#     plt.title("var_th = {}, dimensions_selected = {}".format(var_th, rnn_state_opt_log_dynamic.shape[1]))
#     # plt.show()
#     # get the pyplot figure as an opencv image
#     fig = plt.gcf()
#     fig.canvas.draw()
#     img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
#     img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
#     img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
#     # clear the pyplot figure
#     plt.clf()
#     figures.append(img)
# # # concatenate the figures
# # big_img = np.concatenate(figures, axis=1)
# # cv2.imshow("figures", big_img)
# # cv2.waitKey(0)
# # cv2.imwrite("./logs/dynamic_embedding_analysis.png", big_img)
# # save the figures
# for i in range(len(figures)):
#     cv2.imwrite("./logs/dynamic_embedding_analysis_{}.png".format(i), figures[i])
