from sklearn.datasets import load_boston
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import r2_score
# ①	读取波士顿数据集，获取特征和标签
data=load_boston()

x=data.data
y=data.target
# ①	将数据降维为4维
# ②	将降维后数据进行归一化处理
pipi=Pipeline([('a',PCA(n_components=4)),
               ('b',MinMaxScaler())])

x=pipi.fit_transform(x)
print(x.shape)

# ①	数据切分为训练集和测试集
train_x,test_x,train_y,test_y=train_test_split(x,y,test_size=0.3)
# ②	创建L1正则，分别使用学习率为0.1,0.5，0.8，进行网格搜索交叉验证
l1=Lasso()
a={'alpha':[0.1,0.5,0.8]}

gid=GridSearchCV(l1,a,cv=5)
gid.fit(train_x,train_y)
# ③	打印最优参数和得分
print(gid.best_params_)
print(gid.best_score_)
# ④	使用最优参数创建模型
l1=Lasso(alpha=0.1)
l1.fit(train_x,train_y)
# 打印测试集R2数据结果
test_h=l1.predict(test_x)
print(r2_score(test_y,test_h))
#截距
print(l1.intercept_)
#权重
print(l1.coef_)