#载入数据 
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
import numpy as np
import matplotlib.pyplot as plt

cancer = load_breast_cancer()
X = cancer.data
y = cancer.target
print('data shape:{0}; no.positive:{1}; no.negative:{2}\n'.format(
        X.shape,y[y==1].shape[0],y[y==0].shape[0]))
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2)

clf = SVC(C=1.0, kernel='rbf', gamma=0.1)
clf.fit(X_train,y_train)
train_score = clf.score(X_train,y_train)
test_score = clf.score(X_test,y_test)
print('train score:{0}, test score:{1}\n'.format(train_score,test_score))

from sklearn.model_selection import GridSearchCV

gammas = np.linspace(0,0.0003,30)
parma_grid = {'gamma':gammas}
clf = GridSearchCV(SVC(), parma_grid,cv=5)
clf.fit(X,y)
print('best param ：{0}\nbest socre:{1}'.format(clf.best_params_,clf.best_score_))

'''
画出学习曲线
'''
import time
from sklearn.model_selection import ShuffleSplit

def plot_learning_curve(estimator,title,X,y,ylim=None,cv=None,
                       n_jobs=1,train_sizes=np.linspace(.1,1.0,5)):
    '''
    生成一个简单的测试图和训练曲线

    参数
    ----------
    estimator : 实现fit和predict方法的对象

    title : 图表的标题
        
    X : array-like, shape (n_samples, n_features)
        训练向量，其中n_samples是样本的数量，n_features是特征的数量

    y : array-like, shape (n_samples) or (n_samples, n_features), 可选
        用于分类或回归的相对x的目标 对于无监督学习则为无

    ylim : tuple, shape (ymin, ymax), 可选
        定义绘制的最小值和最大值。

    cv : int, 交叉验证生成器或者一个迭代器, optional
        交叉验证拆分策略
        可能的输入:
          - 无,使用默认的3交叉验证
          - 整数,确定指定折叠数.
          - 要用作交叉验证生成器的对象.
          - 可迭代生成训练/测试拆分.

    n_jobs : 整数，可选
        并行运行的作业数(默认1)
    '''
    plt.title(title)
    if ylim is not None:
        plt.ylim(*ylim)
    plt.xlabel('Training examples')
    plt.ylabel('Score')
    train_sizes,train_scores,test_scores = learning_curve(
            estimator,X,y,cv=cv,n_jobs=n_jobs,train_sizes=train_sizes)
    train_scores_mean = np.mean(train_scores,axis=1)
    train_scores_std = np.std(train_scores,axis=1)
    test_scores_mean = np.mean(test_scores,axis=1)
    test_scores_std = np.std(test_scores,axis=1)
    plt.grid()
    
    #模型准确性平均值的上下方差空间填充
    plt.fill_between(train_sizes,train_scores_mean - train_scores_std,
                     train_scores_mean + train_scores_std,alpha=0.1,
                     color='r')
    plt.fill_between(train_sizes,test_scores_mean - test_scores_std,
                     test_scores_mean + test_scores_std,alpha=0.1,
                     color='g')
    #模型准确性平均值
    plt.plot(train_sizes,train_scores_mean,'o-',color='r',label='Training score')
    plt.plot(train_sizes,test_scores_mean,'o-',color='g',label='Cross-validation score')
    
    plt.legend(loc=0)
    return plt

cv = ShuffleSplit(n_splits=10,test_size=0.2,random_state=0)
title = 'Learning Curves for Gaussian Kernel'

start = time.clock()
plt.figure(figsize=(10,4),dpi=80)
plot_learning_curve(SVC(C=1.0,kernel='rbf',gamma=0.01),
                    title,X,y,ylim=(0.5,1.01),cv=cv)

print('elaspe:{0:.6f}'.format(time.clock()-start))

#使用二阶多项式核函数来拟合模型
clf = SVC(C=1.0,kernel='poly',degree=2)
clf.fit(X_train,y_train)
train_score = clf.score(X_train,y_train)
test_score = clf.score(X_test,y_test)
print('train score:{0}, test score:{1}'.format(train_score,test_score))

#画出一阶多项式和二阶多项式的学习曲线
cv = ShuffleSplit(n_splits=5,test_size=0.2,random_state=0)
title = 'Learning Curves with degree={0}'
degrees = [1,2]

start = time.clock()
plt.figure(figsize=(12,4),dpi=80)
for i in range(len(degrees)):
    plt.subplot(1,len(degrees),i+1)
    plot_learning_curve(SVC(C=1.0,kernel='poly',degree=degrees[i]),
                        title.format(degrees[i]),X,y,
                        ylim=(0.8,1.01),cv=cv,n_jobs=4)

print('elaspe:{0:.6}'.format(time.clock()-start))




