import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from scipy.stats import pearsonr
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt

import jax
import jax.numpy as jnp
from jax import random
from optax import adam


# 1. 生成点云数据集
num_clusters = 50  # 聚类数量
num_points = 10000  # 数据点总数
dim = 10  # 数据维度

# cluster_means = np.random.randn(num_clusters, dim)  # 聚类中心
cluster_means = np.random.uniform(-1, 1, size=(num_clusters, dim))
cluster_stds = np.random.uniform(0.2, 0.4, size=(num_clusters, dim))  # 聚类标准差

data = []
labels = []
for i in range(num_clusters):
    cluster_data = np.random.randn(num_points // num_clusters, dim) * cluster_stds[i] + cluster_means[i]
    data.append(cluster_data)
    labels.append(np.full(num_points // num_clusters, i))

data = np.concatenate(data, axis=0)
labels = np.concatenate(labels, axis=0)

print("shape of labels:", labels.shape)

# 对 data 进行PCA, 并且可视化前三维
pca = PCA(n_components=3)
data_pca = pca.fit_transform(data)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(num_clusters):
    indices = np.where(labels == i)
    # ax.scatter(data_pca[indices, 0], data_pca[indices, 1], data_pca[indices, 2], label=f"Cluster {i}")
    # 使用比平时大一倍的点来绘制 scatter
    ax.scatter(data_pca[indices, 0], data_pca[indices, 1], data_pca[indices, 2], s=20)
plt.legend()
plt.show()

# 2. 构建MLP模型
class MLP(nn.Module):
    def __init__(self, input_dim, hidden_dims, output_dim):
        super(MLP, self).__init__()
        layers = []
        prev_dim = input_dim
        for hidden_dim in hidden_dims:
            layers.append(nn.Linear(prev_dim, hidden_dim))
            layers.append(nn.ReLU())
            prev_dim = hidden_dim
        layers.append(nn.Linear(prev_dim, output_dim))
        self.model = nn.Sequential(*layers)
        self.penultimate_layer = len(layers) - 2  # 倒数第二层的索引

    def forward(self, x):
        for i, layer in enumerate(self.model):
            x = layer(x)
            if i == self.penultimate_layer:
                penultimate_activations = x  # 保存倒数第二层的激活
        return x, penultimate_activations  # 同时返回最后一层输出和倒数第二层激活

# 3. 训练模型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = MLP(dim, [dim, dim*5, dim], num_clusters).to(device)
# model = MLP(dim, [dim, dim], num_clusters).to(device)
# model = MLP(dim, [64,64,64], num_clusters).to(device)
# model = MLP(dim, [64,64], num_clusters).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())

data = torch.from_numpy(data).float().to(device)
labels = torch.from_numpy(labels).long().to(device)

num_epochs = 1000
batch_size = 64

accs = []
pearson_rs = []

for epoch in range(num_epochs):
    permutation = torch.randperm(num_points)
    for i in range(0, num_points, batch_size):
        indices = permutation[i:i+batch_size]
        batch_x, batch_y = data[indices], labels[indices]
        
        optimizer.zero_grad()
        outputs, _ = model(batch_x)
        loss = criterion(outputs, batch_y)
        loss.backward()
        optimizer.step()
    
    if (epoch + 1) % 10 == 0:
        print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}")
        # 进行预测准确率测试
        with torch.no_grad():
            outputs, _ = model(data)
            _, predicted = torch.max(outputs, 1)
            correct = (predicted == labels).sum().item()
            accuracy = correct / num_points
            print(f"Accuracy: {accuracy:.4f}")

            # 4. 新增操作
            # 1. 将所有 cluster_means 聚类中心点送到MLP中进行推理
            cluster_means_tensor = torch.from_numpy(cluster_means).float().to(device)
            with torch.no_grad():
                _, reps = model(cluster_means_tensor)  # 获取倒数第二层的激活作为表征

            # 2. 采集所有 cluster_means 进入 MLP 之后的MLP最后一层的神经激活，作为 cluster_means 对应的神经表征
            reps = reps.cpu().numpy()
            print("shape of reps:", reps.shape)

            # 3. 计算 cluster_means 的距离矩阵
            cluster_means_dist = np.linalg.norm(cluster_means[:, None] - cluster_means[None, :], axis=-1)

            # 3. 计算聚类中心对应的标签的距离矩阵
            cluster_labels = np.arange(num_clusters)
            cluster_labels_dist = np.abs(cluster_labels[:, None] - cluster_labels[None, :])

            # 4. 计算 reps 的距离矩阵
            reps_dist = np.linalg.norm(reps[:, None] - reps[None, :], axis=-1)

            # 将 cluster_means 和 reps 的对角元素全都删除，产生两个新的矩阵
            def del_diag(matrix):
                new_matrix = np.zeros((matrix.shape[0], matrix.shape[1] - 1))
                for i in range(matrix.shape[0]):
                    for j in range(matrix.shape[1]):
                        if i != j:
                            new_matrix[i, j if j < i else j - 1] = matrix[i, j]
                return new_matrix

            cluster_labels_dist = del_diag(cluster_labels_dist)
            cluster_means_dist = del_diag(cluster_means_dist)
            reps_dist = del_diag(reps_dist)

            print("shape of cluster_means_dist:", cluster_means_dist.shape)
            print("shape of cluster_labels_dist:", cluster_labels_dist.shape)
            print("shape of reps_dist:", reps_dist.shape)

            # 5. 计算两个距离矩阵的 pearson R 相关系数，并打印出来
            pearson_r, _ = pearsonr(cluster_means_dist.flatten(), reps_dist.flatten())
            print(f"Pearson R correlation coefficient: {pearson_r:.4f}")

            accs.append(accuracy)
            pearson_rs.append(pearson_r)

# # 6. 可视化 cluster_means 和 reps 的距离矩阵
# fig, axes = plt.subplots(1, 2, figsize=(10, 5))
# axes[0].imshow(cluster_means_dist, cmap="hot")
# axes[0].set_title("Cluster Means Distance Matrix")
# axes[1].imshow(reps_dist, cmap="hot")
# axes[1].set_title("Reps Distance Matrix")
# plt.show()
            
# 创建一个包含两个子图的画布
fig, (ax1, ax2) = plt.subplots(2, 1)

# 绘制第一个子图
ax1.plot(accs)
ax1.set_title("Accuracy")

# 绘制第二个子图
ax2.plot(pearson_rs)
ax2.set_title("Pearson R")
# ax2.set_ylim(0, 1)

# 调整子图之间的间距
plt.subplots_adjust(hspace=0.5)

# 显示图表
plt.show()

# # 对 reps 进行 PCA，然后可视化
# pca = PCA(n_components=2)
# reps_pca = pca.fit_transform(reps)
# plt.scatter(reps_pca[:, 0], reps_pca[:, 1])
# plt.title("PCA of Reps")
# plt.show()

# # 对 cluster_means 进行 PCA，然后可视化
# pca = PCA(n_components=2)
# cluster_means_pca = pca.fit_transform(cluster_means)
# plt.scatter(cluster_means_pca[:, 0], cluster_means_pca[:, 1])
# plt.title("PCA of Cluster Means")
# plt.show()


# 对 cluster_means 进行 PCA，然后可视化
pca = PCA(n_components=2)
cluster_means_pca = pca.fit_transform(cluster_means)
plt.scatter(cluster_means_pca[:, 0], cluster_means_pca[:, 1], label="Cluster Means")
plt.scatter(reps[:, 0], reps[:, 1], label="Reps")
plt.legend()
plt.title("PCA of Cluster Means and Reps")
plt.show()

