# pca主成分分析
import numpy as np
from numpy import linalg as LA
from sklearn.preprocessing import scale
from sklearn.decomposition import PCA

x = np.matrix([[1,3,-7],[2,5,-14],[-3,-7,2]])

# sklearn的标准化函数中求方差时以均值作为数学期望，但是分母使用的是n而不是n-1
standard_x = scale(np.asarray(x), with_mean=True, with_std=True, axis=0)
print(f"after standard:\n{standard_x}")

# numpy的协方差算的是行与行之间的协方差，而pca中的是列与列的协方差，所以转置了一下
cov_x = np.cov(standard_x.T)
print(f"cov_x:\n{cov_x}")

eigVals, eigVects = LA.eig(cov_x)
print(f"eigVals:\n{eigVals}")
# numpy 的eig函数返回的eigVects是列向量，所以转置一下
print(f"eigVects:\n{eigVects.T}")

max_eigVal = -1
max_eigVal_index = -1;
for i in range(len(eigVals)):
    if eigVals[i] > max_eigVal:
        max_eigVal = eigVals[i]
        max_eigVal_index = i

print(f"max_eigVal:{max_eigVal}")
print(f"max_eigVects:\n{eigVects[:,max_eigVal_index]}")

result = standard_x.dot(eigVects[:,max_eigVal_index])
print(f"result:\n{result}")

pca = PCA(n_components=1)
pca.fit(standard_x)

print(f"sklearn pca特征向量:\n{pca.components_}")
print(f"sklearn pca结果:\n{pca.transform(standard_x)}")

