
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier, RadiusNeighborsClassifier
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
import time
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from time import time
from sklearn.datasets import load_files
import logging
from sklearn.datasets import fetch_olivetti_faces
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report

def plot_gallery(images, titles, h, w, n_row=2, n_col=5):
    """显示图片阵列"""
    plt.figure(figsize=(2 * n_col, 2.2 * n_row), dpi=144)
    plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.01)
    for i in range(n_row * n_col):
        plt.subplot(n_row, n_col, i + 1)
        plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
        plt.title(titles[i])
        plt.axis('off')

def title_prefix(prefix, title):
    return "{}: {}".format(prefix, title)
        
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')

data_home='./'
logging.info('开始加载数据集！')
faces = fetch_olivetti_faces(data_home=data_home)
logging.info('加载数据集成功！')

X = faces.data
y = faces.target
targets = np.unique(faces.target)
target_names = np.array(["c%d" % t for t in targets])
n_targets = target_names.shape[0]
n_samples, h, w = faces.images.shape
print('样本个数: {}\n类别个数: {}'.format(n_samples, n_targets))
print('图像大小: {}x{}\n数据集形状: {}\n'.format(w, h, X.shape))

n_row = 2
n_col = 6

sample_images = None
sample_titles = []
for i in range(n_targets):
    people_images = X[y==i]
    people_sample_index = np.random.randint(0, people_images.shape[0], 1)
    people_sample_image = people_images[people_sample_index, :]
    if sample_images is not None:
        sample_images = np.concatenate((sample_images, people_sample_image), axis=0)
    else:
        sample_images = people_sample_image
    sample_titles.append(target_names[i])

plot_gallery(sample_images, sample_titles, h, w, n_row, n_col)
plt.savefig('face.png')


X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, random_state=4)

print("计算数据集压缩后保留原数据信息的比例 ...")
candidate_components = range(10, 300, 30)
explained_ratios = []
start = time()
for c in candidate_components:
    pca = PCA(n_components=c)
    X_pca = pca.fit_transform(X)
    explained_ratios.append(np.sum(pca.explained_variance_ratio_))
print('计算完成用时 {0:.2f}秒'.format(time()-start))

plt.figure(figsize=(10, 6), dpi=144)
plt.grid()
plt.plot(candidate_components, explained_ratios)
plt.xlabel('PCA特征数', fontproperties='SimHei')
plt.ylabel('保留原数据信息的比例', fontproperties='SimHei')
plt.title('PCA保留原数据信息的比例', fontproperties='SimHei')
plt.yticks(np.arange(0.5, 1.05, .05))
plt.xticks(np.arange(0, 300, 20));
plt.savefig('evr.png')

n_row = 1
n_col = 5

sample_images = sample_images[0:5]
sample_titles = sample_titles[0:5]

plotting_images = sample_images
plotting_titles = [title_prefix('orig', t) for t in sample_titles]
candidate_components = [140, 75, 37, 19, 8]
for c in candidate_components:
    print("PCA压缩(n_components={}) ...".format(c))
    start = time()
    pca = PCA(n_components=c)
    pca.fit(X)
    X_sample_pca = pca.transform(sample_images)
    X_sample_inv = pca.inverse_transform(X_sample_pca)
    plotting_images = np.concatenate((plotting_images, X_sample_inv), axis=0)
    sample_title_pca = [title_prefix('{}'.format(c), t) for t in sample_titles]
    plotting_titles = np.concatenate((plotting_titles, sample_title_pca), axis=0)
    print("压缩完成用时 {0:.2f}秒".format(time() - start))

print("使用不同的PCA维数画样例图片 ...")
plot_gallery(plotting_images, plotting_titles, h, w,
    n_row * (len(candidate_components) + 1), n_col)
plt.savefig('diffpic.png')

n_components = 140

print("训练PCA模型 ...")
start = time()
pca = PCA(n_components=n_components, svd_solver='randomized', whiten=True).fit(X_train)
print("压缩完成用时 {0:.2f}秒".format(time() - start))

print("将数据用PCA压缩 ...")
start = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("压缩完成用时 {0:.2f}秒".format(time() - start))


if __name__ == '__main__':
    print("搜索SVM最佳参数 ...")
    param_grid = {'C': [1, 5, 10, 50, 100],
                  'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01]}
    clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid, verbose=2, n_jobs=4)
    clf = clf.fit(X_train_pca, y_train)
    print("使用GridSearchCV搜索到的最佳参数是:")
    print(clf.best_params_)
    
    start = time()
    print("预测测试集 ...")
    y_pred = clf.best_estimator_.predict(X_test_pca)
    cm = confusion_matrix(y_test, y_pred, labels=range(n_targets))
    print("预测完成用时 {0:.2f}秒。\n".format(time()-start))
    print("混淆矩阵:")
    np.set_printoptions(threshold=np.nan)
    print(cm)
    
    print(classification_report(y_test, y_pred, target_names=target_names))