import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
# 0. 对weather.csv数据集做预处理
df=pd.read_csv('C:/Users/PC/Desktop/weather.csv')
df.drop(["Date/Time","Weather"], axis=1, inplace=True)
X = np.array(df)
print("数据集为：",X)
#  数据集为： [[ -1.8   -3.9   86.     4.     8.   101.24]
#  [ -1.8   -3.7   87.     4.     8.   101.24]
#  [ -1.8   -3.4   89.     7.     4.   101.26]
#  ...
#  [ -0.5   -1.5   93.    28.     4.8   99.95]
#  [ -0.2   -1.8   89.    28.     9.7   99.91]
#  [  0.    -2.1   86.    30.    11.3   99.89]]


# 1. 标准化数据
X_normalized = X - np.mean(X, axis=0)
print("标准化后的数据集为：",X_normalized)
# 标准化后的数据集为： [[-10.59814435  -6.45529372  18.56830601 -10.94546903 -19.66444672
#     0.18837659]
#  [-10.59814435  -6.25529372  19.56830601 -10.94546903 -19.66444672
#     0.18837659]
#  [-10.59814435  -5.95529372  21.56830601  -7.94546903 -23.66444672
#     0.20837659]
#  ...
#  [ -9.29814435  -4.05529372  25.56830601  13.05453097 -22.86444672
#    -1.10162341]
#  [ -8.99814435  -4.35529372  21.56830601  13.05453097 -17.96444672
#    -1.14162341]
#  [ -8.79814435  -4.65529372  18.56830601  15.05453097 -16.36444672
#    -1.16162341]]

# 2. 计算协方差矩阵
cov_matrix = np.cov(X_normalized, rowvar=False)
print("协方差矩阵为：",cov_matrix)

# 协方差矩阵为： [[ 136.60660364  118.64130836  -43.54007325   -6.28366035   40.3434852
#     -2.33189429]
#  [ 118.64130836  118.4412631    25.68491702   -9.04795791    6.98037097
#     -2.94497057]
#  [ -43.54007325   25.68491702  286.2485502   -13.63352141 -135.33050005
#     -3.30464943]
#  [  -6.28366035   -9.04795791  -13.63352141   75.49343956    0.53550837
#     -2.61515113]
#  [  40.3434852     6.98037097 -135.33050005    0.53550837  159.33225853
#      2.47001127]
#  [  -2.33189429   -2.94497057   -3.30464943   -2.61515113    2.47001127
#      0.71234401]]

# 3. 计算协方差矩阵的特征值和特征向量
eigenvalues, eigenvectors = np.linalg.eig(cov_matrix)
print("协方差矩阵的特征值：",eigenvalues)
print("协方差矩阵的特征向量为：",eigenvectors)

# 协方差矩阵的特征值： [3.86712842e+02 2.43836325e+02 7.82692038e+01 6.72295839e+01
#  3.04096258e-01 4.82408378e-01]
# 协方差矩阵的特征向量为： [[-0.24634075 -0.67496736 -0.13011414 -0.09345058  0.67372481 -0.06455604]
#  [-0.04392437 -0.69348047 -0.05685412  0.01713668 -0.71023325  0.09590706]
#  [ 0.81100462 -0.237831    0.27217629  0.42768578  0.16907275 -0.01128143]
#  [-0.03011544  0.08146748 -0.79083796  0.60456283  0.00544974  0.03880109]
#  [-0.52790403  0.00237559  0.52810267  0.66501502  0.00234536 -0.01299188]
#  [-0.00829393  0.01725456  0.03795898 -0.01780486  0.11420518  0.99238741]]

#4.将特征值的下标排序(从大到小) 
idx = eigenvalues.argsort()[::-1]
print("特征值的下标排序(从大到小)为：",idx)

# 特征值的下标排序(从大到小)为： [0 1 2 3 5 4]


#5.根据索引idx重新排列eigenvectors的特征向量
eigenvectors = eigenvectors[:,idx]
print(eigenvectors)

# [[-0.24634075 -0.67496736 -0.13011414 -0.09345058 -0.06455604  0.67372481]
#  [-0.04392437 -0.69348047 -0.05685412  0.01713668  0.09590706 -0.71023325]
#  [ 0.81100462 -0.237831    0.27217629  0.42768578 -0.01128143  0.16907275]
#  [-0.03011544  0.08146748 -0.79083796  0.60456283  0.03880109  0.00544974]
#  [-0.52790403  0.00237559  0.52810267  0.66501502 -0.01299188  0.00234536]
#  [-0.00829393  0.01725456  0.03795898 -0.01780486  0.99238741  0.11420518]]


#6.选择前k个最大的特征值对应的特征向量‌，并将它们存储在eigenvectors矩阵的前k列
#  选择特征值最大的特征向量作为主成分
k =1
eigenvectors = eigenvectors[:, :k]
print("主成分特征向量组成的矩阵为：",eigenvectors)

# 主成分特征向量组成的矩阵为： [[-0.24634075]
#  [-0.04392437]
#  [ 0.81100462]
#  [-0.03011544]
#  [-0.52790403]
#  [-0.00829393]]

#7.将原始数据投影到选择的主成分上，得到降维后的数据 Y=XV (X为原始数据，V为主成分的特征向量组成的矩阵)
X_pca = np.dot(X_normalized, eigenvectors)
print("降维后的数据为：",X_pca)

# 降维后的数据为： [[28.66228732]
#  [29.46450707]
#  [33.09444293]
#  ...
#  [34.89087977]
#  [28.99973838]
#  [25.62592224]]

print("原来的数据个数，维数:", X.shape)
print("现在的数据个数，维数:", X_pca.shape)

# 原来的数据个数，维数: (8784, 6)
# 现在的数据个数，维数: (8784, 1)



