# 手写数字识别降维处理

from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier as RFC
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np

data = pd.read_csv('/Users/skyf/MachineLearning/sklearn/localDataSet/digit_recognizor.csv')
# print(data)

x = data.iloc[:, 1:]
y = data.iloc[:, 0]

# # 寻找最佳的方差贡献率
# pca_line = PCA().fit(x)
#
# print(pca_line.explained_variance_ratio_)
# plt.figure(figsize=[20, 5])
# plt.plot(np.cumsum(pca_line.explained_variance_ratio_))
# plt.xlabel("不同的维度")
# plt.ylabel("方差贡献率")

# 缩小范围
# score = []
# for i in range(1, 101, 10):
#     fit = PCA(i).fit_transform(x)
#     mean = cross_val_score(RFC(n_estimators=10, random_state=0), fit, y, cv=5).mean()
#     score.append(mean)
#
# plt.figure(figsize=[20, 5])
# plt.plot(range(1, 101, 10), score)
# plt.show()

# 继续缩小范围 确定最佳维度
# score = []
# for i in range(10, 31):
#     fit = PCA(i).fit_transform(x)
#     mean = cross_val_score(RFC(n_estimators=10, random_state=0), fit, y, cv=5).mean()
#     score.append(mean)
#
# plt.figure(figsize=[20, 5])
# plt.plot(range(10, 31), score)
# plt.show()


# 使用随机森查看降维后的效果可以发现，维度低了很多，效果却没差多少。
pca = PCA(26)
result = pca.fit_transform(x)

# meanPCA = cross_val_score(RFC(n_estimators=10, random_state=0), result, y, cv=5).mean()
# mean = cross_val_score(RFC(n_estimators=10, random_state=0), x, y, cv=5).mean()
# print("RFC+PCA:%s" % {meanPCA})
# print("RFC:%s" % {mean})

# 用knn试一试
from sklearn.neighbors import KNeighborsClassifier as KNN

# meanPCA = cross_val_score(KNN(), result, y, cv=5).mean()
# # 原数据还是不要用knn跑了，能玩死你。。。
# # mean = cross_val_score(KNN(), x, y, cv=5).mean()
# print("KNN+PCA:%s" % {meanPCA})
# # print("KNN:%s" % {mean})

score = []
for i in range(11):
    x = PCA(26).fit_transform(x)
    mean = cross_val_score(KNN(), x, y, cv=5).mean()
    score.append(mean)
plt.figure(figsize=[20, 5])
plt.plot(range(11),score)
plt.show()
