#第一步，数据准备
import pandas as pd
import numpy as np


#读取数据并将数据集进行分离，拆分为特征变量（data）与标签（target）
data_url="http://lib.stat.cmu.edu/datasets/boston"                      
raw_df=pd.read_csv(data_url,sep="\s+",skiprows=22,header=None)         
data=np.hstack([raw_df.values[::2,:],raw_df.values[1::2,:2]])
target=raw_df.values[1::2,2]


#将特征变量（data）与标签（target）分别赋值给x和y
x,y=data,target


##第二步，训练与评估模型;导入3种模型及划分样本函数
from sklearn.linear_model import LinearRegression,Ridge,Lasso
from sklearn.model_selection import train_test_split                     
import matplotlib.pyplot as plt


#分割训练集和测试集
x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=1,test_size=0.3)   


#定义各种线性回归对象
lr=LinearRegression()
rd=Ridge()
ls=Lasso()
models=[lr,rd,ls]
names=['Linear','Ridge','Lasso']                  

#分别训练模型并进行回归，计算预测准确率
for model,name in zip(models,names):                 
    model.fit(x_train,y_train)                       
    score=model.score(x_test,y_test)
    print("%s模型的预测准确率为：%.5f"%(name,score))    


#第三步,显示回归效果;测试alpha在不同取值下的回归效果
scores=[]
alphas=[0.0001,0.0005,0.001,0.005,0.01,0.05,0.1,0.5,1,5,10,50]
for index,model in enumerate(models):                
    scores.append([])                                
    for alpha in alphas:
         if index>0:
            model.alpha=alpha
         model.fit(x_train,y_train)
         scores[index].append(model.score(x_test,y_test))


#绘制结果图
fig=plt.figure(figsize=(10,7))
for i,name in enumerate(names):
    plt.subplot(2,2,i+1                                )
    plt.plot(range(len(alphas)),scores[i],'r-')       
    plt.title(name)
    print('%s模型的最大预测准确率为：%.5f'%(name,max(scores[i])))
plt.show()


#岭回归给出了最高的预测准确率
#较大的alpha参数会导致模型的复杂度下降,预测的准确率也会随之下降
#正则化回归算法的alpha值进行调整,最高预测准确率均高于简单线性回归的预测准确率




