import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
df = pd.read_csv('FE_boston_housing.csv')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
y = df['log_MEDV']
X = df.drop(['MEDV','log_MEDV'],axis=1)
feat_names = X.columns
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,random_state=33,test_size=0.2)
print(X_train.shape)


'''确定模型类型'''
from sklearn.linear_model import LinearRegression
#1> 使用默认配置初始化学习器实例
lr = LinearRegression()
#2> 训练数据训练模型参数
lr.fit(X_train,y_train)
y_test_pred_lr = lr.predict(X_test)
y_train_pred_lr = lr.predict(X_train)
#4> 看看个特征的权重系数，系数的绝对值大小可是为该特征的重要性
'''# coef维权值系数Wjcoef 是coefficient的缩写，是“系数”；t ratio 是t分布的值'''
fs = pd.DataFrame({'columns':list(feat_names),'coef':list((lr.coef_.T))})
print(fs.sort_values(by=['coef'],ascending=False))#y指定按哪列排序。ascending表示是否升序



'''模型评价'''
#使用r2_score评价模型在测试集和训练集上的性能，输出评价结果
print('The r2 score of LinearRegression on test is',r2_score(y_test,y_test_pred_lr))
print('The r2 score of LinearRegression on train is',r2_score(y_train,y_train_pred_lr))



'''在训练集上观察预测残差的分布，看是否符合模型假设，噪声为0均值的高斯噪声'''
f,ax = plt.subplots(figsize = (7,5))
#tight_layout会自动调整子图参数，使之填充整个图像区域
f.tight_layout()
ax.hist(y_train-y_train_pred_lr,bins=40,label="Residuals Linear",color='g',alpha=0.5)# alpha=0.5为透明度
ax.set_title('Histogram of Residuals')
ax.legend(loc='best');plt.show()

#还可以观察预测值和真值的散点图
plt.figure(figsize=(4,3))
plt.scatter(y_train,y_train_pred_lr)
plt.plot([1,3],[1,3],'--r')
plt.axis('tight')
plt.xlabel('True price')
plt.ylabel('Predicted price')
plt.tight_layout();plt.show()


'''岭回归'''
from sklearn.linear_model import RidgeCV

alphas = [0.01,0.1,1,10,100]
ridge = RidgeCV(alphas=alphas,store_cv_values=True)#标记是否与每个alpha对应的交叉验证值应该存储在cv_values_属性中(见下面)

ridge.fit(X_train,y_train)
y_test_pred_rigde = ridge.predict(X_test)
y_train_pred_rigde = ridge.predict(X_train)
# 评估
print('The r2 score of RigdeCV on test is',r2_score(y_test,y_test_pred_rigde))
print('The r2 score of RigdeCV on train is',r2_score(y_train,y_train_pred_rigde))
#可视化
'''轴用来为超过一维的数组定义的属性，二维数据拥有两个轴：第0轴沿着行的垂直往下，第1轴沿着列的方向水平延伸。'''
MSE_mean = np.mean(ridge.cv_values_,axis=0)
plt.plot(np.log10(alphas),MSE_mean.reshape(len(alphas),1))
plt.xlabel('log(alpha)')
plt.ylabel('MSE')
print('alpha is:',ridge.alpha_)
plt.show()


#各特征权重系数，系数得绝对值大小可视为该特征的重要性
fs = pd.DataFrame({'columns':list(feat_names),'coef_lr':list((lr.coef_.T)),'coef_ridge':list((ridge.coef_.T))})
print(fs.sort_values(by=['coef_lr'],ascending=False))




'''Lasso回归'''
from sklearn.linear_model import LassoCV


lasso = LassoCV()#标记是否与每个alpha对应的交叉验证值应该存储在cv_values_属性中(见下面)
#2> 训练数据训练模型参数、预测
lasso.fit(X_train,y_train)
y_test_pred_lasso = ridge.predict(X_test)
y_train_pred_lasso = ridge.predict(X_train)
# 评估
print('The r2 score of lassoCV on test is',r2_score(y_test,y_test_pred_lasso))
print('The r2 score of lassoCV on train is',r2_score(y_train,y_train_pred_lasso))
#可视化
MSE_means = np.mean(lasso.mse_path_,axis=1)#第一个列子 df.mean(axis=1)代表沿着列水平方向计算均值,
plt.plot(np.log10(lasso.alphas_),MSE_means)
plt.xlabel('log(alpha)')
plt.ylabel('MSE')
print('alpha is:',lasso.alpha_)
plt.show()
#各特征权重系数，系数得绝对值大小可视为该特征的重要性
fs = pd.DataFrame({'columns':list(feat_names),'coef_lr':list((lr.coef_.T)),'coef_ridge':list((ridge.coef_.T)),'coef_lasso':list((lasso.coef_.T))})
print(fs.sort_values(by=['coef_lr'],ascending=False))

