import numpy as np
from sklearn import datasets
# Standard scientific Python imports
import matplotlib.pyplot as plt

# The digits dataset
digits = datasets.load_digits()

n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))

print("shape of data: {0}".format(data.shape))

def my_PCA(X, r):
    n, m = X.shape #（样本数，维度）
    data_mean = np.mean(X, 0)  # 对列求平均值
    X = X - np.tile(data_mean, (n, 1))  # 将所有样例减去对应均值得到A
    assert np.allclose(X.mean(axis=0), np.zeros(m))  # 确保X已经中心化，每个维度的均值为0

    # Compute covariance matrix
    C = np.dot(X.T, X) / (n - 1)
    # Eigen decomposition
    eigen_vals, eigen_vecs = np.linalg.eig(C)
    # Project X onto PC space
    X_pca = np.dot(X, eigen_vecs[:, 0:r])

    # Get variance explained by singular values
    explained_variance_ = eigen_vals / (n - 1)
    total_var = explained_variance_.sum()
    explained_variance_ratio_ = explained_variance_ / total_var

    return X_pca, explained_variance_ratio_, eigen_vals, eigen_vecs

r = 2
X_pca, explained_variance_ratio_, eigen_vals, eigen_vecs = my_PCA(data, r=r)
print("方差的比重：", explained_variance_ratio_[:r])
print("特征值：", eigen_vals[:r])
print("\n 特征向量：",eigen_vecs[:,:r])
result = np.array(explained_variance_ratio_).cumsum()
# X = pca.fit_transform(data)
#
# #Percentage of variance explained by each of the selected components.
# print("方差的比重：", pca.explained_variance_ratio_)
# #The singular values corresponding to each of the selected components.
# print("特征值：", pca.singular_values_)
#
# #Principal axes in feature space, representing the directions of maximum variance in the data.
# print("\n 特征向量：",pca.components_)
#
# pca.components_.shape
print(result)