# -*- coding: utf-8 -*-
"""
Created on Sat Nov 24 22:27:09 2018

@author: Sz-wyz
"""
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier,RadiusNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.feature_selection import SelectKBest
import matplotlib.pyplot as plt
import numpy as np

#加载数据
data = pd.read_csv(r'diabetes.csv')
print('shape of data： {0}'.format(data.shape))

#划分训练数据集和测试数据集
X = data.iloc[:,0:8]
Y = data.iloc[:,8]
print('shape of X {}; shape of Y {}'.format(X.shape,Y.shape))
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=0.2)

'''
使用普通的k-近邻算法、带权重的k-近邻算法、指定半径的k-近邻算法
构建三个模型
'''
models = []
models.append(('KNN',KNeighborsClassifier(n_neighbors=2)))
models.append(('KNN with weights',KNeighborsClassifier(n_neighbors=2,weights='distance')))
models.append(('Radius Neighbors',RadiusNeighborsClassifier(n_neighbors=2,radius=500)))

#分别训练3个模型，并计算评分
results = []
for name,model in models:
    model.fit(X_train,Y_train)
    results.append((name,model.score(X_test,Y_test)))
for i in range(len(results)):
    print('name:{},score:{}'.format(results[i][0],results[i][1]))

'''
由于训练样本和测试样本是随机分配的
为了更准确的对比算法准确性，多次随机分配训练数据集和交叉验证数据集，求模型准确性的平均值
'''
print('\n')
results = []
for name,model in models:
    kfold = KFold(n_splits=10)#把数据集分成10份
    cv_result = cross_val_score(model,X,Y,cv=kfold)
    results.append((name,cv_result))
for i in range(len(results)):
    print('name : {} ;cross val score: {}'.format(
            results[i][0],results[i][1].mean()))
    
#由上述可以看出普通的knn算法性能更优 使用普通knn训练
print('使用普通knn算法:')
knn = KNeighborsClassifier(n_neighbors=2)
knn.fit(X_train,Y_train)
train_score = knn.score(X_train,Y_train)
test_score = knn.score(X_test,Y_test)
print('train score:{}; test score:{}'.format(train_score,test_score))

#画出学习曲线
def plot_learning_curve(estimator,title,X,y,ylim=None,cv=None,
                       n_jobs=1,train_sizes=np.linspace(.1,1.0,5)):
    '''
    生成一个简单的测试图和训练曲线

    参数
    ----------
    estimator : 实现fit和predict方法的对象

    title : 图表的标题
        
    X : array-like, shape (n_samples, n_features)
        训练向量，其中n_samples是样本的数量，n_features是特征的数量

    y : array-like, shape (n_samples) or (n_samples, n_features), 可选
        用于分类或回归的相对x的目标 对于无监督学习则为无

    ylim : tuple, shape (ymin, ymax), 可选
        定义绘制的最小值和最大值。

    cv : int, 交叉验证生成器或者一个迭代器, optional
        交叉验证拆分策略
        可能的输入:
          - 无,使用默认的3交叉验证
          - 整数,确定指定折叠数.
          - 要用作交叉验证生成器的对象.
          - 可迭代生成训练/测试拆分.

    n_jobs : 整数，可选
        并行运行的作业数(默认1)
    '''
    plt.title(title)
    if ylim is not None:
        plt.ylim(*ylim)
    plt.xlabel('Training examples')
    plt.ylabel('Score')
    train_sizes,train_scores,test_scores = learning_curve(
            estimator,X,y,cv=cv,n_jobs=n_jobs,train_sizes=train_sizes)
    train_scores_mean = np.mean(train_scores,axis=1)
    train_scores_std = np.std(train_scores,axis=1)
    test_scores_mean = np.mean(test_scores,axis=1)
    test_scores_std = np.std(test_scores,axis=1)
    plt.grid()
    
    #模型准确性平均值的上下方差空间填充
    plt.fill_between(train_sizes,train_scores_mean - train_scores_std,
                     train_scores_mean + train_scores_std,alpha=0.1,
                     color='r')
    plt.fill_between(train_sizes,test_scores_mean - test_scores_std,
                     test_scores_mean + test_scores_std,alpha=0.1,
                     color='g')
    #模型准确性平均值
    plt.plot(train_sizes,train_scores_mean,'o-',color='r',label='Training score')
    plt.plot(train_sizes,test_scores_mean,'o-',color='g',label='Cross-validation score')
    
    plt.legend(loc=0)
    return plt

cv = ShuffleSplit(n_splits=10,test_size=0.2,random_state=0)
plt.figure(figsize=(10,6),dpi=80)

print('\n学习曲线：')
plot_learning_curve(knn, "Learn Curve for KNN Diabetes",X, Y,ylim=(0,1.01), cv=cv)
plt.show()
   
'''
测试集准确度不高，减少特征 
选择相关性最大的两个特征
'''
selector = SelectKBest(k=2)
X_new = selector.fit_transform(X,Y)

#画出数据
plt.figure(figsize=(10,6),dpi=80)
plt.ylabel('BMI')
plt.xlabel('Glucose')
#画出Y==0的阴性样本，用圆圈表示
plt.scatter(X_new[Y==0][:, 0], X_new[Y==0][:, 1], c='r', s=20, marker='o')
#画出Y==1的阳性样本，用三角表示
plt.scatter(X_new[Y==1][:, 0], X_new[Y==1][:, 1], c='g', s=20, marker='^')
'''
可以看出，阳性样本和阴性样本几乎重叠在一起
所以knn算法在这个实例上，无法达到很高的预测准确性
'''
