"""
Created on 2018/4/22 17:47 星期日
@author: Matt  zhuhan1401@126.com
Description: 使用线性回归预测波士顿房价
"""

import time
from matplotlib import pyplot as plt
from sklearn.datasets import load_boston
from commonTool.plotCurve import plot_learning_curve
from sklearn.model_selection import train_test_split,ShuffleSplit
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures  # 多项式特征
from sklearn.pipeline import Pipeline


def polyNomialModel(degree=1):
    polyNomialFeatures = PolynomialFeatures(degree=degree, include_bias=False)
    linearRegression = LinearRegression(normalize=True)
    pipeline = Pipeline([("polynormal_features", polyNomialFeatures), ("linear_regression", linearRegression)])
    return pipeline


boston = load_boston()
X = boston.data  # (506,13)
Y = boston.target  # (506,)
# print(boston.feature_names)  特征列名

# 例行分隔数据集
XTrain, XTest, YTrain, YTest = train_test_split(X, Y, test_size=0.2, random_state=3)
model = LinearRegression(normalize=True)
start = time.clock()
model.fit(XTrain, YTrain)
trainScore = model.score(XTrain, YTrain)
CVScore = model.score(XTest, YTest)
print('elaspe:{0:0.6f};trainScore:{1:0.6f};CVScore:{2:0.6f};'.format(time.clock() - start, trainScore, CVScore))
# elaspe:0.001877;           trainScore:0.723941;         CVScore:0.794958;  拟合效果一般啊

# 优化-先观察数据
# 1. 数据特征范围相差较大 从0.001~100 故考虑归一化  在模型中normalize=True --->加快算法收敛速度,优化训练效率,但无法提升准确性
#       elaspe:0.002381;trainScore:0.723941;CVScore:0.794958; 。。。。还变大了
# 2. 观察到trainScore较低,即数据对训练数据的拟合成本较高---->欠拟合   Therefore--> 1)挖掘更多输入特征  2)增加多项式特征,即增加模型复杂度
modelWithPoly = polyNomialModel(degree=2)
start = time.clock()
modelWithPoly.fit(XTrain, YTrain)
trainScoreWithPoly = modelWithPoly.score(XTrain, YTrain)
CVScoreWithPoly = modelWithPoly.score(XTest, YTest)
print('elaspeWithPoly:{0:0.6f};trainScoreWithPoly:{1:0.6f};CVScoreWithPoly:{2:0.6f};'.format(time.clock() - start,
                                                                                             trainScoreWithPoly,
                                                                                             CVScoreWithPoly))
# elaspeWithPoly:0.023148;  trainScoreWithPoly:0.930547;   CVScoreWithPoly:0.860465;

# 画出学习曲线
cv=ShuffleSplit(n_splits=10,test_size=0.2,random_state=0)
plt.figure(figsize=(18,4),dpi=200)
title='Learining Curvers (degree={0})'
degrees=[1,2,3]
plt.figure(figsize=(18,4),dpi=200)
for i in range(len(degrees)):
    plt.subplot(1,3,i+1)
    plot_learning_curve(polyNomialModel(degrees[i]),title.format(degrees[i]),X,Y,ylim=(0.01,1.01),cv=cv)
plt.show()

