#-*- encoding:utf-8 -*-
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import learning_curve#学习曲线
from sklearn.model_selection import ShuffleSplit
from matplotlib import pyplot as plt

n_dots = 200
X = np.linspace(0,1,n_dots)
y = np.sqrt(X) + 0.2*np.random.rand(n_dots) - 0.1

#因为sklearn的接口中，需要用到n_sample * n_feature 的矩阵
#所以需要转化为200*1的矩阵
X = X.reshape(-1,1)
y = y.reshape(-1,1)

def polynomial_model(degree = 1):#degree表示多项式的阶数
    polynomial_features = PolynomialFeatures(degree=degree,
                                             include_bias=False)
    linear_regression = LinearRegression()
    #这是一个流水线，先增加多项式函数，再用线性回归算法来拟合数据
    pipeline = Pipeline([('polynomial_features',polynomial_features),
                         ('linear_regression',linear_regression)])
    return pipeline

def plot_learning_curve(estimator,title,X,y,ylim=None,cv=None,
                       n_jobs=1,train_sizes=np.linspace(.1,1.0,5)):
    '''
    生成一个简单的测试图和训练曲线

    参数
    ----------
    estimator : 实现fit和predict方法的对象

    title : 图表的标题
        
    X : array-like, shape (n_samples, n_features)
        训练向量，其中n_samples是样本的数量，n_features是特征的数量

    y : array-like, shape (n_samples) or (n_samples, n_features), 可选
        用于分类或回归的相对x的目标 对于无监督学习则为无

    ylim : tuple, shape (ymin, ymax), 可选
        定义绘制的最小值和最大值。

    cv : int, 交叉验证生成器或者一个迭代器, optional
        交叉验证拆分策略
        可能的输入:
          - 无,使用默认的3交叉验证
          - 整数,确定指定折叠数.
          - 要用作交叉验证生成器的对象.
          - 可迭代生成训练/测试拆分.

    n_jobs : 整数，可选
        并行运行的作业数(默认1)
    '''
    plt.title(title)
    if ylim is not None:
        plt.ylim(*ylim)
    plt.xlabel('Training examples')
    plt.ylabel('Score')
    train_sizes,train_scores,test_scores = learning_curve(
            estimator,X,y,cv=cv,n_jobs=n_jobs,train_sizes=train_sizes)
    train_scores_mean = np.mean(train_scores,axis=1)
    train_scores_std = np.std(train_scores,axis=1)
    test_scores_mean = np.mean(test_scores,axis=1)
    test_scores_std = np.std(test_scores,axis=1)
    plt.grid()
    
    #模型准确性平均值的上下方差空间填充
    plt.fill_between(train_sizes,train_scores_mean - train_scores_std,
                     train_scores_mean + train_scores_std,alpha=0.1,
                     color='r')
    plt.fill_between(train_sizes,test_scores_mean - test_scores_std,
                     test_scores_mean + test_scores_std,alpha=0.1,
                     color='g')
    #模型准确性平均值
    plt.plot(train_sizes,train_scores_mean,'o-',color='r',label='Training score')
    plt.plot(train_sizes,test_scores_mean,'o-',color='g',label='Cross-validation score')
    
    plt.legend(loc=0)
    return plt

#为了让学习曲线更加平滑，计算10交叉验证数据集的分数,每次重新选择20%的数据计算一遍
cv = ShuffleSplit(n_splits=10,test_size=0.2,random_state=0)
tltles = ['Learning Curves (Under Fitting)',
          'Learning Curves',
          'Learning Curves (Over Fitting)']
degrees = [1,3,10]
'''
由结果图像可知：一阶多项式欠拟合，三阶多项式较好的拟合了数据集，十阶多项式过拟合
'''

plt.figure(figsize=(18,4), dpi=80)
for i in range(len(degrees)):
    plt.subplot(1,3,i + 1)
    plot_learning_curve(polynomial_model(degrees[i]),tltles[i],
                        X,y,ylim=(0.75,1.01),cv=cv)
plt.show()           

'''
关于ShuffleSplit
class sklearn.model_selection.ShuffleSplit(n_splits=10, 
        test_size=’default’, train_size=None, random_state=None)
参数：
n_splits:int, 划分训练集、测试集的次数，默认为10
test_size:float, int, None, default=0.1； 测试集比例或样本数量，该值为[0.0, 1.0]内的浮点数时，
    表示测试集占总样本的比例；该值为整型值时，表示具体的测试集样本数量；train_size不设定具体数值时，
    该值取默认值0.1，train_size设定具体数值时，test_size取剩余部分
train_size:float, int, None； 训练集比例或样本数量，该值为[0.0, 1.0]内的浮点数时，表示训练集占
    总样本的比例；该值为整型值时，表示具体的训练集样本数量；该值为None(默认值)时，训练集取总体样本除
    去测试集的部分
random_state:int, RandomState instance or None；随机种子值，默认为None
'''


