import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler

# 生成数据
# 1-生成一个二维数据集
X, _ = make_blobs(n_samples=100, centers=2, n_features=2, random_state=42)

# 标准化数据
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# 2-计算协方差矩阵
cov_matrix = np.cov(X_scaled, rowvar=False)
print("Covariance Matrix:\n", cov_matrix)

# 3-计算协方差矩阵的特征值和特征向量
eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)
print("Eigenvalues:\n", eigenvalues)
print("Eigenvectors:\n", eigenvectors)

# 4-选择最重要的主成分
sorted_indices = np.argsort(eigenvalues)[::-1]
top_eigenvector = eigenvectors[:, sorted_indices[0]]
print("Top Eigenvector:\n", top_eigenvector)

# 5-将数据投影到新的特征空间
X_pca = np.dot(X_scaled, top_eigenvector)
# 6-可视化原始数据和PCA结果
# 绘制原始数据
plt.figure(figsize=(12, 6))

plt.subplot(1, 2, 1)
plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c='blue', label='Original Data')
plt.title('Original Data')
plt.xlabel('Feature 1')
plt.ylabel('Feature 2')
plt.legend()

# 绘制PCA结果
plt.subplot(1, 2, 2)
plt.scatter(X_pca, np.zeros_like(X_pca), c=_, edgecolor='k', marker='o', s=50,  label='PCA Projection')
# plt.scatter(X_pca, np.zeros_like(X_pca), c='red', label='PCA Projection')
plt.title('PCA Projection')
plt.xlabel('First Principal Component')
plt.ylabel(' ')
plt.legend()

# 绘制主成分方向
plt.arrow(0, 0, top_eigenvector[0], top_eigenvector[1], head_width=0.1, head_length=0.1, fc='green', ec='green')
plt.text(top_eigenvector[0], top_eigenvector[1], 'First PC', fontsize=12, ha='right')

plt.tight_layout()
plt.show()
