import numpy as np
from sklearn.decomposition import PCA

def pca(X,k):
    m_samples, n_features = X.shape     # m个样例，n维特征
    #中心化  去均值    均值为0
    mean = np.mean(X,axis=0)
    normX = X - mean #去均值
    print(normX)
    cov_mat = np.cov(normX[:,0],normX[:,1])     # 协方差矩阵
    corr = np.corrcoef(normX[:,0],normX[:,1])   # 相关矩阵
    print(cov_mat, corr)    # 查看2者区别，PCA得用协方差算
    #对二维数组的transpose操作就是对原数组的转置操作  矩阵相乘
    vals , vecs = np.linalg.eig(cov_mat)# 得到特征向量和特征值
    print('特征值', vals)
    print('特征向量', vecs)
    eig_pairs = [(np.abs(vals[i]),vecs[:,i]) for i in range(n_features)]
    print(eig_pairs)
    print('----------')
    #将特征值由大到小排列
    eig_pairs.sort(reverse=True)
    print(eig_pairs)
    print('------------')
    #获取多少个维度
    mat_k = np.array([0 for _ in range(n_features)]).reshape(-1, 1)
    for i in range(k):
        feature = eig_pairs[i][1]
        mat_k = np.c_[mat_k, feature.reshape(-1, 1)]
    mat_k = np.delete(mat_k, [0], axis=1)
    print(mat_k)
    #将数据进行还原操作  normX  中心化后的数据 和  特征向量相乘
    data = np.dot(normX, mat_k)
    return data

def func():
    p = PCA(n_components=1)
    a = p.fit_transform(X)
    print(p.explained_variance_ratio_)
    print(p.explained_variance_)

X = np.array([
    [-2, -8],
    [-1, -4],
    [0, 0],
    [1, 4],
    [2, 8]
])
# X = np.array([
#     [0.69, 0.49],
#     [-1.31, -1.21],
#     [0.39, 0.99],
#     [0.09, 0.29],
#     [1.29, 1.09],
#     [0.49, 0.79],
#     [0.19, -0.31],
#     [-0.81, -0.81],
#     [-0.31, -0.31],
#     [-0.71, -1.01]
# ])
print(pca(X, 1))
func()