from grid_env_ideal_obs_repeat_task import *
from grid_agent import *
from checkpoint_utils import *
from maze_factory import *
from replay_config import *
import argparse
import json
import sys
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.lines import Line2D
from sklearn.manifold import TSNE
import random
from sklearn.decomposition import PCA
from matplotlib.animation import FuncAnimation
from sklearn.cluster import KMeans
import threading
import mplcursors
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from scipy.spatial.distance import pdist, squareform
from scipy.stats import pearsonr
from matplotlib.widgets import Button
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from scipy.optimize import curve_fit
from scipy.fft import fft2,ifft2
from scipy.fft import fftn, ifftn
from scipy.interpolate import Rbf
import scipy.signal
from scipy.ndimage import gaussian_filter
from ripser import ripser
from persim import plot_diagrams
from scipy.spatial.distance import pdist, squareform

# Parameters
image_size = 500
num_nodes = 38
sigma = 1
peak_value = 10
sample_start_x, sample_start_y = 65, 48
sample_size = 41
step = 2
sample_range = 100

# Derived parameters
dx = image_size / (num_nodes - 1)
dy = dx * np.sqrt(3) / 2

X = []
Y = []

# Draw the nodes of the hexagonal grid
for i in range(num_nodes):
    for j in range(num_nodes):
        if i % 2 == 0:
            x = j * dx
        else:
            x = j * dx + dx / 2
        y = i * dy
        
        X.append(x)
        Y.append(y)
X = np.array(X)
Y = np.array(Y)

print(X.shape)
print(Y.shape)

# draw X/Y on an image
img = np.zeros((image_size, image_size))
for i in range(X.shape[0]):
    x = int(X[i])
    y = int(Y[i])

    if x < image_size and y < image_size:
        img[x, y] = peak_value

# apply gaussian blur on img use a 5x5 kernel
img = gaussian_filter(img, sigma=sigma)

# 从(65,48)开始，取一个大小为(21,21)的区域，滑动这个窗口，每次拷贝一个局部图像出来
# 滑动方式是，以3为步长，从左到右15次，从上到下15次

imgs = []

for i in range(sample_range):
    for j in range(sample_range):
        x = sample_start_x + i * step
        y = sample_start_y + j * step
        img_sample = img[x:x+sample_size, y:y+sample_size]
        imgs.append(img_sample)
imgs = np.array(imgs)
print("imgs.shape: ", imgs.shape)

# plt.imshow(img, cmap='viridis', interpolation='nearest')
# plt.show()

# for i in range(sample_range):
#     for j in range(sample_range):
#         plt.imshow(imgs[i*15+j], cmap='viridis', interpolation='nearest')
#         plt.show()

# 选择 imgs 中的前25个图像，拼贴成一个 5x5 的大图像，每个小图像之间有间隔
grid_size = 10
img_large = np.zeros((grid_size*(sample_size+6), grid_size*(sample_size+6)))
for i in range(grid_size):
    for j in range(grid_size):
        # make a border for imgs[i*5+j]
        img_large[i*(sample_size+6):(i+1)*(sample_size+6), j*(sample_size+6):(j+1)*(sample_size+6)] = np.pad(imgs[i*grid_size+j], 3, 'constant', constant_values=1)

plt.imshow(img_large, cmap='viridis', interpolation='nearest')
plt.show()

# 重组 neural population
imgs_reshaped = imgs.reshape((imgs.shape[0], imgs.shape[1]*imgs.shape[2]))
print("imgs_reshaped.shape: ", imgs_reshaped.shape)

# 将 imgs_reshaped 的两个维度交换，变成 neural_population
neural_population = imgs_reshaped.T
print("neural_population.shape: ", neural_population.shape)

# 对 neural_population 进行 PCA
pca = PCA()
pca.fit(neural_population)
neural_population_pca = pca.transform(neural_population)
print("shape of neural_population_pca: ", neural_population_pca.shape)

# 绘制 PCA 的前三维
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(neural_population_pca[:, 0], neural_population_pca[:, 1], neural_population_pca[:, 2])
plt.show()

# 在 neural_population 中随机选择1000个点
random_indices = np.random.choice(neural_population.shape[1], 1000)
neural_population_sample = neural_population[:, random_indices]

# # 计算 neural_population_sample 的 persistence diagram
# dist_matrix = squareform(pdist(neural_population_sample.T, 'euclidean'))
# dgms = ripser(dist_matrix, maxdim=2, coeff=47,do_cocycles= True, distance_matrix=True)['dgms']
# plot_diagrams(dgms, show=True, lifetime=True)

# # 将 dgms 保存为一个 npz 文件
# data = {}
# for i, matrix in enumerate(dgms):
#     # 将每个矩阵存储到字典中的对应键名
#     data[f'dgms_{i}'] = matrix

# file_name = "./logs/persistence_diagram_of_synthetic_data.npz"
# np.savez(file_name, **data)
