import os
from traceback import print_tb
from jax import grad
import jax.numpy as jnp
from jax import jit
import time
import numpy as np
import numpy.random as npr
import jax
import jax.numpy as jnp
from jax import device_put
from jax import jit, grad, lax, random
from jax.example_libraries import optimizers
from jax.example_libraries import stax
from jax.example_libraries.stax import Dense, FanOut, Relu, Softplus, Sigmoid, FanInSum
from jax.nn import sigmoid
from functools import partial
from jax import vmap
from flax import linen as nn
from flax.training import train_state
from flax import struct
from jax import lax

from jax import tree_util
from jax.tree_util import tree_structure
from jax.tree_util import tree_flatten, tree_unflatten

import jax.experimental.sparse as sparse

import optax
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from scipy.integrate import odeint


# 生成2个 128维的随机向量
v1 = np.random.uniform(-1, 1, 128)
v2 = np.random.uniform(-1, 1, 128)
# 生成一个 128x128 的随机矩阵
m = np.random.uniform(-1, 1, (128, 128))
# 计算矩阵和向量的乘积
result = np.dot(m, (v1-v2))
result_norm = np.linalg.norm(result)
print(result_norm)

# 计算矩阵m的行列式
m_det = np.linalg.det(m)
result_norm = m_det * np.linalg.norm(v1-v2)
print(result_norm)

exit(0)

# # 生成一个长度为 10000 的 128 维随机向量列表
# vectors = [np.random.uniform(-1, 1, 600) for _ in range(100000)]
# vectors = np.array(vectors)

# # 对 vectors 进行 pca
# pca = PCA()
# pca.fit(vectors)
# vectors_pca = pca.transform(vectors)

# # 绘制 pca 的 explained_variance_ratio_
# plt.bar(range(600), pca.explained_variance_ratio_)
# plt.show()

# exit()

# # # Van der Pol方程
# # def vanderpol(X, t):
# #     x, y = X
# #     dxdt = -y + x*(1-x**2-y**2) 
# #     dydt = x + y*(1-x**2-y**2)
# #     return [dxdt, dydt]

# # # 随机初始条件
# # x0, y0 = np.random.uniform(-2, 2, 2)

# # # 求解    
# # t = np.linspace(0, 20, 1000)
# # traj = odeint(vanderpol, [x0, y0], t)

# # # 绘制3D相空间图
# # fig = plt.figure()
# # ax = plt.axes(projection='3d')
# # ax.plot(traj[:,0], traj[:,1], zs=0, zdir='z')
# # ax.set_xlabel('X')
# # ax.set_ylabel('Y')
# # ax.set_zlabel('Z')
# # ax.set_title('Van der Pol Oscillator Phase Portrait')
# # plt.show()

# # Van der Pol方程  
# def vanderpol(X, t):
#     x, y = X
#     dxdt = -y + x*(1-x**2-y**2)
#     dydt = x + y*(1-x**2-y**2)
#     return [dxdt, dydt]

# # 范围空间    
# x = np.linspace(-2, 2, 30)
# y = np.linspace(-2, 2, 30)
# X, Y = np.meshgrid(x, y)

# # 计算向量场
# DX, DY = vanderpol([X, Y], 0)
# M = (np.hypot(DX, DY))  # 向量大小
# M[M == 0] = 1 # 避免除0
# DX /= M  
# DY /= M  

# # 绘制
# fig, ax = plt.subplots()
# ax.streamplot(X, Y, DX, DY, density=1.5, linewidth=0.5, arrowsize=0.5, arrowstyle='->')
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_title('Van der Pol Vector Field')
# plt.show()

# exit()

# # Generate 3D and 100D point clouds
# points_3d = np.random.uniform(-1, 1, size=(1000, 3))
# points_100d = np.random.uniform(-1, 1, size=(1000, 100))

# # Compute all pairwise distances
# dists_3d = np.sqrt(((points_3d[:,None,:] - points_3d[None,:,:]) ** 2).sum(2))
# dists_100d = np.sqrt(((points_100d[:,None,:] - points_100d[None,:,:]) ** 2).sum(2))

# # Plot histograms of distances
# plt.hist(dists_3d.flatten(), alpha=0.5, label='3D', bins=100)
# plt.hist(dists_100d.flatten(), alpha=0.5, label='100D', bins=100)
# plt.legend()
# plt.xlabel('Distance')
# plt.ylabel('Frequency')
# plt.title('Distance Distribution in Low and High Dimensions')
# plt.show()

# # 创建一个长度为10000的128维随机向量列表，向量的每个元素的值在-1到1之间
# vectors = [np.random.uniform(-1, 1, 128) for _ in range(10000)]
# vectors = np.array(vectors)

# # 对 vectors 进行 pca 分析
# pca = PCA(n_components=128)
# pca.fit(vectors)

# # 显示 pca 的variance ratio
# print(pca.explained_variance_ratio_)

# # 将 pca.explained_variance_ratio_ 绘制成柱状图
# plt.bar(range(128), pca.explained_variance_ratio_)
# plt.show()

v1 = np.array([i for i in range(100)])
v1 = v1.astype(float)
v2 = v1.copy()

# 给 v2 的每一位施加一个-5到5之间的随机扰动
v2 += np.random.uniform(-20, 20, 100)

print(v1)
print(v2)

proj_err = np.abs(np.dot(v1, v2) / np.linalg.norm(v1)**2-1)
print("proj: ", proj_err)

dist_err = np.linalg.norm(v1-v2)/np.linalg.norm(v1)
print("dist: ", dist_err)

exit()

# Vectors A, B and C
# A = np.random.rand(128) 
# B = np.random.rand(128)
# C = np.random.rand(128)

A = np.array([1,2,3])
B = np.array([4,5.1,6])
C = np.array([5,7,9])

# Calculate matrix M
M = np.column_stack((A, B)) 

# Calculate pseudo-inverse
pinvM = np.linalg.pinv(M)

# Find coefficients 
coeffs = pinvM.dot(C)

# Reconstruct C 
C_recon = coeffs[0]*A + coeffs[1]*B

# Compare
print(coeffs)
print(C_recon-C)
exit()

# create a random symmerty 3x3 matrix using numpy
dim = 10
def random_symmetric_matrix():
    
    A = npr.randn(dim, dim)
    A = A + A.T
    A = np.where(A > 0, 1, -1)

    # vec = npr.randint(0, 2, dim)
    # vec = np.where(vec > 0, 1, -1)
    # print("vec: ", vec)
    # A = np.outer(vec, vec)

    diagW = np.diag(np.diag(A))
    A = A - diagW
    
    return A

random_sym_mat = random_symmetric_matrix()
print(random_sym_mat)

state = npr.randn(dim)
state = np.where(state > 0, 1, -1)

for i in range(1000):

    state_new = np.sign(np.matmul(random_sym_mat, state))
    
    update_neuron = npr.randint(0, dim)
    state[update_neuron] = state_new[update_neuron]

    # state = state_new
    
    print(state)