from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split,ShuffleSplit,learning_curve
from sklearn.linear_model import LogisticRegression
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
import time
import matplotlib.pyplot as plt

#载入数据
cancer = load_breast_cancer()
X = cancer.data
y = cancer.target
print('data shape:{0}, no.positive:{1}, np.negative:{2}'.
      format(X.shape,y[y==1].size,y[y==0].size))

#将数据分成训练数据集和测试数据集
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2)

#模型训练
model = LogisticRegression()
model.fit(X_train,y_train)
train_score = model.score(X_train,y_train)
test_score = model.score(X_test,y_test)
print('\ntrain_score: {train_score:.6f}; test_score: {test_score:.6f}'.
      format(train_score=train_score,test_score=test_score))

#样本预测
y_pred = model.predict(X_test)
#预测概率，找到预测概率低于90%的样本
y_pred_proba = model.predict_proba(X_test)
print('\nsample of predict probability: {0}'.format(y_pred_proba[0]))

#找出第一例，即预测为阴性的概率大于0.1的样本，保存在result中
y_pred_proba_0 = y_pred_proba[:,0] > 0.1#返回的是False 和 True 对
result = y_pred_proba[y_pred_proba_0]
#在result结果中，找到第二列，即预测为阳性的概率大于0.1的样本
y_pred_proba_1 = result[:,1] > 0.1
print('\n下列是预测自信度低于90%的样本')
print(result[y_pred_proba_1])

#模型优化 增加多项式预处理
def polynomial_model(degree=1,**kwarg):
    polynomial_features = PolynomialFeatures(
            degree=degree,include_bias=False)
    logistic_regression = LogisticRegression(**kwarg)
    pipeline = Pipeline([('polynomial_features',polynomial_features),
                         ('logistic_regression',logistic_regression)])
    return pipeline

model = polynomial_model(degree=2,penalty='l1')
'''
使用L1范数作为正则项，L1范数作为正则项，可以实现参数的稀疏化
自动选择对模型有关联的特征
'''
start = time.clock()
model.fit(X_train,y_train)
train_score = model.score(X_train,y_train)
cv_score = model.score(X_test,y_test)
print('\nelaspe:{0:.6f}, train_score:{1:.6f}, cv_score:{2:.6f}'.
      format(time.clock() - start,train_score,cv_score))

logistic_regression = model.named_steps['logistic_regression']
print('\nmodel parameters shape:{0}, count of non-zero element:{1}'.
      format(logistic_regression.coef_.shape,
             np.count_nonzero(logistic_regression.coef_)))
'''
逻辑回归模型的coef_属性保存的是模型参数
从输出结果可以看到，增加而阶多项式特征之后，输入特征由原来的30个增加到了495个
最终大部分特征被丢弃，保留了86个有效特征
'''

#学习曲线
def plot_learning_curve(estimator,title,X,y,ylim=None,cv=None,
                       n_jobs=1,train_sizes=np.linspace(.1,1.0,5)):
    '''
    生成一个简单的测试图和训练曲线

    参数
    ----------
    estimator : 实现fit和predict方法的对象

    title : 图表的标题
        
    X : array-like, shape (n_samples, n_features)
        训练向量，其中n_samples是样本的数量，n_features是特征的数量

    y : array-like, shape (n_samples) or (n_samples, n_features), 可选
        用于分类或回归的相对x的目标 对于无监督学习则为无

    ylim : tuple, shape (ymin, ymax), 可选
        定义绘制的最小值和最大值。

    cv : int, 交叉验证生成器或者一个迭代器, optional
        交叉验证拆分策略
        可能的输入:
          - 无,使用默认的3交叉验证
          - 整数,确定指定折叠数.
          - 要用作交叉验证生成器的对象.
          - 可迭代生成训练/测试拆分.

    n_jobs : 整数，可选
        并行运行的作业数(默认1)
    '''
    plt.title(title)
    if ylim is not None:
        plt.ylim(*ylim)
    plt.xlabel('Training examples')
    plt.ylabel('Score')
    train_sizes,train_scores,test_scores = learning_curve(
            estimator,X,y,cv=cv,n_jobs=n_jobs,train_sizes=train_sizes)
    train_scores_mean = np.mean(train_scores,axis=1)
    train_scores_std = np.std(train_scores,axis=1)
    test_scores_mean = np.mean(test_scores,axis=1)
    test_scores_std = np.std(test_scores,axis=1)
    plt.grid()
    
    #模型准确性平均值的上下方差空间填充
    plt.fill_between(train_sizes,train_scores_mean - train_scores_std,
                     train_scores_mean + train_scores_std,alpha=0.1,
                     color='r')
    plt.fill_between(train_sizes,test_scores_mean - test_scores_std,
                     test_scores_mean + test_scores_std,alpha=0.1,
                     color='g')
    #模型准确性平均值
    plt.plot(train_sizes,train_scores_mean,'o-',color='r',label='Training score')
    plt.plot(train_sizes,test_scores_mean,'o-',color='g',label='Cross-validation score')
    
    plt.legend(loc=0)
    return plt

cv = ShuffleSplit(n_splits=10,test_size=0.2,random_state=0)
title = 'Learning Curves (degree={0}, penalty={1})'
degrees = [1,2]#一阶和二阶多项式
#L1范数
penalty = 'l1'
plt.figure(figsize=(12,4),dpi=50)
for i in range(len(degrees)):
    plt.subplot(1,len(degrees),i+1)
    plot_learning_curve(polynomial_model(degree=degrees[i],
                                         penalty=penalty),
                                         title.format(degrees[i], penalty),
                                         X,y,
                                         ylim=(0.8, 1.01),
                                         cv=cv)
plt.show()

#L2范数
penalty = 'l1'
plt.figure(figsize=(12,4),dpi=50)
for i in range(len(degrees)):
    plt.subplot(1,len(degrees),i+1)
    plot_learning_curve(polynomial_model(degree=degrees[i],
                                         penalty=penalty),
                                         title.format(degrees[i], penalty),
                                         X,y,
                                         ylim=(0.8, 1.01),
                                         cv=cv)
plt.show()

