import numpy as np
from sklearn.decomposition import PCA, TruncatedSVD

# 生成高维向量数据
def generate_high_dim_vectors(num_points=1000, dimensions=50):
    """生成随机高维向量"""
    np.random.seed(42)
    data = np.random.rand(num_points, dimensions)
    return data

# 生成高维向量
high_dim_vectors = generate_high_dim_vectors(num_points=1000, dimensions=50)

# PCA 降维
def apply_pca(data, n_components):
    """使用 PCA 进行降维"""
    pca = PCA(n_components=n_components)
    reduced_data = pca.fit_transform(data)
    explained_variance = np.sum(pca.explained_variance_ratio_)
    return reduced_data, explained_variance

# 将数据降维到 10 维
pca_reduced, pca_variance = apply_pca(high_dim_vectors, n_components=10)
print("PCA 降维后数据形状:", pca_reduced.shape)
print("PCA 保留的总方差比例:", pca_variance)

# SVD 降维
def apply_svd(data, n_components):
    """使用 SVD 进行降维"""
    svd = TruncatedSVD(n_components=n_components, random_state=42)
    reduced_data = svd.fit_transform(data)
    explained_variance = np.sum(svd.explained_variance_ratio_)
    return reduced_data, explained_variance

# 将数据降维到 10 维
svd_reduced, svd_variance = apply_svd(high_dim_vectors, n_components=10)
print("SVD 降维后数据形状:", svd_reduced.shape)
print("SVD 保留的总方差比例:", svd_variance)

# 对比 PCA 和 SVD 的结果
pca_mean_distance = np.mean(np.linalg.norm(pca_reduced, axis=1))
svd_mean_distance = np.mean(np.linalg.norm(svd_reduced, axis=1))

print("\nPCA 降维后嵌入向量的平均范数:", pca_mean_distance)
print("SVD 降维后嵌入向量的平均范数:", svd_mean_distance)