from sklearn.datasets import load_boston
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split,GridSearchCV
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
boston = load_boston()
# print(boston.data[:5])
# print(boston.target[:5])

X_train,X_test,y_train,y_test = train_test_split(boston.data,boston.target,random_state=0)
param_grid = {'poly__degree':[1,2,3],'ridge__alpha':[0.001,0.01,0.1,1,10,100]}
poly = PolynomialFeatures(include_bias=False)
# poly.fit(X_train)
# X_train = poly.transform(X_train)
# X_test = poly.transform(X_test)
ridge_regressor = Ridge()
# ridge_regressor.fit(X_train,y_train)
# 构建管道
pipe = Pipeline([('poly',poly),("scaler",MinMaxScaler()),('ridge',ridge_regressor)])
grid = GridSearchCV(pipe,param_grid=param_grid,cv=5)
grid.fit(X_train, y_train)
print("岭回归精度")
# print(ridge_regressor.score(X_test,y_test))
print(f"最好的参数组合:{grid.best_params_}")
print(f"最好的交叉验证集的精度:{grid.best_score_}")
print(grid.score(X_test,y_test))
print("模型的前10个系数",grid.best_estimator_.named_steps['ridge'].coef_[:10])
# 决策回归树
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn import datasets
from sklearn.metrics import mean_squared_error,explained_variance_score
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
housing_data = datasets.load_boston()
# 为使结果与数据顺序无关,先将数据打乱
X,y = shuffle(housing_data.data,housing_data.target,random_state=7)
# 划分训练集和测试集
num_training = int(0.8*len(X))
X_train,y_train = X[:num_training],y[:num_training]
X_test,y_test = X[num_training:],y[num_training:]
# 生成一个最大深度为四的模型
dt_regressor = DecisionTreeRegressor(max_depth=4)
dt_regressor.fit(X_train,y_train)
y_pred_dt = dt_regressor.predict(X_test)
mse = mean_squared_error(y_test,y_pred_dt)
evs = explained_variance_score(y_test,y_pred_dt)
print("### Decision Tree performance ####")
print("mean squared error=",round(mse,2))
print("Explained variance score=",round(evs,2))

ab_regressor = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),n_estimators=400,random_state=7)
ab_regressor.fit(X_train,y_train)
y_pred_ab = ab_regressor.predict(X_test)
mse = mean_squared_error(y_test,y_pred_ab)
evs = explained_variance_score(y_test,y_pred_ab)
print("### AdaBoost Performance ###")
print("Mean squared error = ",round(mse,2))
print("Explained variance score=",round(evs,2))
# 计算特征的相对重要性
DTFImp = dt_regressor.feature_importances_
DtfImp = 100*(DTFImp/max(DTFImp))
index_sorted = np.flipud(np.argsort(DTFImp))  # 降序排列
pos = np.arange((index_sorted.shape[0]))+0.5

plt.figure()
plt.bar(pos,DTFImp[index_sorted],align='center')
plt.xticks(pos,housing_data.feature_names[index_sorted])
plt.ylabel("Relative Importance")
plt.title("Decision Tree Regression")
plt.show()

ABFImp = ab_regressor.feature_importances_
ABFImp = 100*(ABFImp/max(ABFImp))
index_sorted = np.flipud(np.argsort(ABFImp))
pos = np.arange(index_sorted.shape[0])+0.5
print(index_sorted.shape[0])
plt.figure()
plt.bar(pos,ABFImp[index_sorted],align='center')
plt.xticks(pos,housing_data.feature_names[index_sorted])
plt.ylabel('Relative Importance')
plt.title("AdaBoost regressor")
plt.show()