from sklearn.datasets import load_boston#波士顿房价数据集
from sklearn.model_selection import train_test_split
import time
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import ShuffleSplit,learning_curve

def plot_learning_curve(estimator,title,X,y,ylim=None,cv=None,
                       n_jobs=1,train_sizes=np.linspace(.1,1.0,5)):
    '''
    生成一个简单的测试图和训练曲线

    参数
    ----------
    estimator : 实现fit和predict方法的对象

    title : 图表的标题
        
    X : array-like, shape (n_samples, n_features)
        训练向量，其中n_samples是样本的数量，n_features是特征的数量

    y : array-like, shape (n_samples) or (n_samples, n_features), 可选
        用于分类或回归的相对x的目标 对于无监督学习则为无

    ylim : tuple, shape (ymin, ymax), 可选
        定义绘制的最小值和最大值。

    cv : int, 交叉验证生成器或者一个迭代器, optional
        交叉验证拆分策略
        可能的输入:
          - 无,使用默认的3交叉验证
          - 整数,确定指定折叠数.
          - 要用作交叉验证生成器的对象.
          - 可迭代生成训练/测试拆分.

    n_jobs : 整数，可选
        并行运行的作业数(默认1)
    '''
    plt.title(title)
    if ylim is not None:
        plt.ylim(*ylim)
    plt.xlabel('Training examples')
    plt.ylabel('Score')
    train_sizes,train_scores,test_scores = learning_curve(
            estimator,X,y,cv=cv,n_jobs=n_jobs,train_sizes=train_sizes)
    train_scores_mean = np.mean(train_scores,axis=1)
    train_scores_std = np.std(train_scores,axis=1)
    test_scores_mean = np.mean(test_scores,axis=1)
    test_scores_std = np.std(test_scores,axis=1)
    plt.grid()
    
    #模型准确性平均值的上下方差空间填充
    plt.fill_between(train_sizes,train_scores_mean - train_scores_std,
                     train_scores_mean + train_scores_std,alpha=0.1,
                     color='r')
    plt.fill_between(train_sizes,test_scores_mean - test_scores_std,
                     test_scores_mean + test_scores_std,alpha=0.1,
                     color='g')
    #模型准确性平均值
    plt.plot(train_sizes,train_scores_mean,'o-',color='r',label='Training score')
    plt.plot(train_sizes,test_scores_mean,'o-',color='g',label='Cross-validation score')
    
    plt.legend(loc=0)
    return plt

boston = load_boston()
X = boston.data
y = boston.target
X_train,X_test,y_train,y_test = train_test_split(X,y,
                        test_size = 0.2,random_state=3)

model = LinearRegression()
start = time.clock()
model.fit(X_train,y_train)
train_score = model.score(X_train,y_train)
cv_score = model.score(X_test,y_test)
print('elaspe: {0:.6f}; train_score: {1:0.6f}; cv_score: {2:.6f}'.
      format(time.clock()-start, train_score, cv_score))

#模型效果一般，进行模型优化
#欠拟合，增加多项式特征，增加模型的复杂度
def polynomial_model(degree=1):
    polynomial_features = PolynomialFeatures(degree=degree,
                                             include_bias=False)
    linear_regression = LinearRegression(normalize=True)#归一化
    pipeline = Pipeline([('polynomial_features',polynomial_features),
                         ('linear_regression',linear_regression)])
    return pipeline

model = polynomial_model(degree=2)
start = time.clock()
model.fit(X_train,y_train)
train_score = model.score(X_train,y_train)
cv_score = model.score(X_test,y_test)
print('\n模型优化后：\nelaspe: {0:.6f}; train_score: {1:0.6f}; cv_score: {2:.6f}'.
      format(time.clock()-start, train_score, cv_score))

#学习曲线
cv = ShuffleSplit(n_splits=10,test_size=0.2,random_state=0)
plt.figure(figsize=(18,4),dpi=70)
title = 'Learning Curves (degree={0})'
degrees = [1,2,3]

start = time.clock()
for i in range(len(degrees)):
    plt.subplot(1,3,i+1)
    plot_learning_curve(polynomial_model(degrees[i]),title.format(degrees[i]),
                        X,y,ylim=(0.01,1.01),cv=cv)
plt.show()
print('elaspe: {0:.6f}'.format(time.clock()-start))