import torch
import torch.nn as nn
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
from scipy.stats import pearsonr
from sklearn.decomposition import PCA

# 参数设置
num_points = 300
point_dim = 60
proj_dim = 18
n_epochs = 10000

# 1. 定义原始点云P为Parameter,需要优化
P = nn.Parameter(torch.rand(num_points, point_dim)*2-1)

# 2. 随机投影矩阵 
M = torch.rand(proj_dim, point_dim)*2-1

# 3. ground truth
G = torch.rand(num_points, proj_dim)*2-1

# 实现投影变换
proj = nn.Linear(point_dim, proj_dim, bias=False)
proj.weight = nn.Parameter(M)

criterion = nn.MSELoss()
optimizer = torch.optim.Adam([P, proj.weight], lr=0.001)

# 获取训练开始前的 P 和 M
P_initial = P.detach().numpy().copy()
M_initial = proj.weight.detach().numpy().copy()
G_initial = G.detach().numpy().copy()

for epoch in range(n_epochs):

  # 前向传播
  Pj = proj(P)

  loss = criterion(Pj, G)

  optimizer.zero_grad()
  loss.backward()

  optimizer.step()

  if epoch % 1000 == 0:
    print('Epoch: {}, Loss: {}'.format(epoch, loss.item()))

print('Epoch: {}, Loss: {}'.format(epoch, loss.item()))

# 获取训练结束后的 P 和 M
P_final = P.detach().numpy().copy()
M_final = proj.weight.detach().numpy().copy()

# 显示所有矩阵的形状
print("shape of P_initial: ", P_initial.shape)
print("shape of M_initial: ", M_initial.shape)
print("shape of G_initial: ", G_initial.shape)
print("shape of P_final: ", P_final.shape)
print("shape of M_final: ", M_final.shape)

print("diff of P: ", np.sum(np.abs(P_final - P_initial)))
print("diff of M: ", np.sum(np.abs(M_final - M_initial)))

# 计算 P_final 和 G_initial 的距离矩阵
P_final_dist_mat = squareform(pdist(P_final, 'euclidean'))
G_initial_dist_mat = squareform(pdist(G_initial, 'euclidean'))

# 对 P_final_dist_mat 和 G_initial_dist_mat 进行去除对角线元素
def del_diag(matrix):
    new_matrix = np.zeros((matrix.shape[0], matrix.shape[1] - 1))
    for i in range(matrix.shape[0]):
        for j in range(matrix.shape[1]):
            if i != j:
                new_matrix[i, j if j < i else j - 1] = matrix[i, j]
    return new_matrix

P_final_dist_mat = del_diag(P_final_dist_mat)
G_initial_dist_mat = del_diag(G_initial_dist_mat)

# 对两个矩阵进行归一化
P_final_dist_mat = (P_final_dist_mat - np.min(P_final_dist_mat)) / (np.max(P_final_dist_mat) - np.min(P_final_dist_mat))
G_initial_dist_mat = (G_initial_dist_mat - np.min(G_initial_dist_mat)) / (np.max(G_initial_dist_mat) - np.min(G_initial_dist_mat))

# 将 P_final_dist_mat 和 G_initial_dist_mat 展平为向量
P_final_dist_mat_vector = P_final_dist_mat.reshape(-1)
G_initial_dist_mat_vector = G_initial_dist_mat.reshape(-1)

# 计算 P_final_dist_mat_vector 和 G_initial_dist_mat_vector 的皮尔逊相关系数
corr, _ = pearsonr(P_final_dist_mat_vector, G_initial_dist_mat_vector)

print("corr: ", corr)

# 将两个距离矩阵作为图像进行可视化
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.imshow(P_final_dist_mat, aspect='auto')
plt.title('P_final_dist_mat')
# 在最左侧空白处，标上 corr 值
plt.text(100,2700, "corr: " + str(corr), fontsize=12, color='red')

plt.subplot(1, 2, 2)
plt.imshow(G_initial_dist_mat, aspect='auto')
plt.title('G_initial_dist_mat')

plt.show()

# 计算训练前后，P 的维度变化
pca = PCA()
pca.fit(P_initial)
pca_loading_ = pca.explained_variance_ratio_
pca_loading_initial = [np.sum(pca_loading_[:i]) for i in range(0, pca_loading_.shape[0])]

pca.fit(P_final)
pca_loading_ = pca.explained_variance_ratio_
pca_loading_final = [np.sum(pca_loading_[:i]) for i in range(0, pca_loading_.shape[0])]

# 将 variance ratio 作为 bar 绘制出来
plt.figure(figsize=(10, 5))
plt.bar(range(P_initial.shape[1]), pca_loading_initial)
plt.show()

plt.figure(figsize=(10, 5))
plt.bar(range(P_initial.shape[1]), pca_loading_final)
plt.show()
