# -*- coding: utf-8 -*-
"""
交叉验证
Created on Mon Apr  9 17:11:36 2018

@author: Allen
"""
'''
问题引入：
    对数据集分为训练数据集和测试数据集的划分，问题在于针对了特定的数据集过拟合
说白了，就是模型在围绕测试数据集在打转。
'''
'''
解决方法：
    在训练数据集中，再分出来一个验证数据集，使用验证数据集在优化模型，
使用测试数据集来测试模型.
    交叉验证，每一个数据都有机会成为验证数据集，这样得到的超参数更加可靠。
    留一发，训练数据集为n-1，好处是完全避免了随机带来的影响，
        坏处是运算量极大。
'''
import numpy as np
from sklearn import datasets

digits = datasets.load_digits()
X = digits.data
y = digits.target

# 测试数据集 train_test_split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size = 0.4, random_state = 666 )

# 使用KNN算法
from sklearn.neighbors import KNeighborsClassifier
# 测试超参数

best_scores, best_k, best_p = 0, 0, 0
for k in range( 2, 11 ):
    for p in range( 1, 6 ):
        knn_clf = KNeighborsClassifier( weights = "distance", n_neighbors = k, p = p )
        knn_clf.fit( X_train, y_train )
        score = knn_clf.score( X_test, y_test )
        if score > best_scores:
            best_scores, best_k, best_p = score, k, p
            
print( "best k = ", best_k )
print( "best p = ", best_p )
print( "best scores = ", best_scores )

'''输出
k =  3
p =  4
scores =  0.986091794159
'''

# 使用交叉验证


from sklearn.model_selection import cross_val_score
knn_clf = KNeighborsClassifier()
print( cross_val_score( knn_clf, X_train, y_train ) ) # 参数1：对象 参数2：训练集 参数3：测试集
# 参数 cv = 3， 分成三份
'''
输出： [ 0.98895028  0.97777778  0.96629213]
'''


best_scores, best_k, best_p = 0, 0, 0
for k in range( 2, 11 ):
    for p in range( 1, 6 ):
        knn_clf = KNeighborsClassifier( weights = "distance", n_neighbors = k, p = p )
        scores = cross_val_score( knn_clf, X_train, y_train )
        score = np.mean( scores )
        if score > best_scores:
            best_scores, best_k, best_p = score, k, p
            
print( "best k = ", best_k )
print( "best p = ", best_p )
print( "best scores = ", best_scores )


'''输出
best k =  2
best p =  2
best scores =  0.982359987401
总结：
    这次使用了交叉验证，理论上来说要比第一种可靠性更高。
'''


best_knn_clf = KNeighborsClassifier( weights = "distance", n_neighbors = 2, p = 2 )
best_knn_clf.fit( X_train, y_train )
print( best_knn_clf.score( X_test, y_test ) ) #输出 0.980528511822


'''
使用交叉验证的方式找到了最佳的超参数，对于0.98的预测结果是可以信赖的。
'''

## 网格搜索
from sklearn.model_selection import GridSearchCV # CV 就是 cross validation
param_grid = [
        {
            "weights":["distance"],
            "n_neighbors":[i for i in range(2, 11)],
            "p":[i for i in range(1, 6)]
        }
]

grid_search = GridSearchCV( knn_clf, param_grid, verbose = 1 )
grid_search.fit( X_train, y_train )

print( grid_search.best_score_ ) # 0.982374768089
print( grid_search.best_params_ ) #{'n_neighbors': 2, 'p': 2, 'weights': 'distance'}
print( grid_search.best_estimator_ )
''' 返回的是一个 knn对象
KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
           metric_params=None, n_jobs=1, n_neighbors=2, p=2,
           weights='distance')
'''
best_knn_clf = grid_search.best_estimator_
print( best_knn_clf.score( X_test, y_test ) ) #0.980528511822
'''
总结：
    网格搜索本身就包含了“交叉验证”
'''    
