'RMSE处理，加MinMaxScaler'
'zhjsw'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('boston_housing.csv')
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
df = df[df.MEDV<50]
y = df['MEDV']
X = df.drop('MEDV',axis=1)
log_y = np.log1p(y)
X['RAD'].astype('object')#astype：转换数组的数据类型。
x_cat = X['RAD']
x_cat = pd.get_dummies(x_cat,prefix='RAD')
X = df.drop('RAD',axis=1)
feat_names = X.columns

from sklearn.preprocessing import MinMaxScaler
mm_X = MinMaxScaler()
mm_y = MinMaxScaler()
mm_log_y = MinMaxScaler()
X = mm_X.fit_transform(X)
fe_data = pd.DataFrame(data=X,columns=feat_names,index=df.index)
fe_data = pd.concat([fe_data,x_cat],axis=1,ignore_index=False)
fe_data['MEDV'] = y
fe_data['log_MEDV'] = log_y
fe_data.to_csv('FE_MinMaxScaler_boston_housing.csv',index=False)
print(fe_data.info(),fe_data.head())

df1 = pd.read_csv('FE_MinMaxScaler_boston_housing.csv')
y1 = df1['log_MEDV']
X1 = df1.drop(['MEDV','log_MEDV'],axis=1)
columns_names = X1.columns

'最小二乘'
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
from sklearn.linear_model import LassoCV
X_train,X_test,y_train,y_test = train_test_split(X1,y1,random_state=33,test_size=0.2)
lr = LinearRegression()
lr.fit(X_train,y_train)
y_test_pred_lr = lr.predict(X_test)
y_train_pred_lr = lr.predict(X_train)

fs = pd.DataFrame({'columns':list(columns_names),'coef':list((lr.coef_.T))})
print(fs.sort_values(by=['coef'],ascending=False))
print('The rooted mean_squared_error score of LinearRegression on test is',sqrt(mean_squared_error(y_test,y_test_pred_lr)))
print('The rooted mean_squared_error score of LinearRegression on train is',sqrt(mean_squared_error(y_train,y_train_pred_lr)))

'岭回归'
alphas = [0.01,0.1,1,10,100]
ridge = RidgeCV(alphas=alphas,store_cv_values=True)#标记是否与每个alpha对应的交叉验证值应该存储在cv_values_属性中(见下面)
ridge.fit(X_train,y_train)
y_test_pred_rigde = ridge.predict(X_test)
y_train_pred_rigde = ridge.predict(X_train)

fs = pd.DataFrame({'columns':list(columns_names),'coef_lr':list((lr.coef_.T)),'coef_ridge':list((ridge.coef_.T))})
print(fs.sort_values(by=['coef_lr'],ascending=False))
print('The  rooted mean_squared_error score of RigdeCV on test is',sqrt(mean_squared_error(y_test,y_test_pred_rigde)))
print('The  rooted mean_squared_error score of RigdeCV on train is',sqrt(mean_squared_error(y_train,y_train_pred_rigde)))
'''轴用来为超过一维的数组定义的属性，二维数据拥有两个轴：第0轴沿着行的垂直往下，第1轴沿着列的方向水平延伸。'''
RMSE_mean = np.mean(ridge.cv_values_,axis=0)
RMSE_mean = np.sqrt(RMSE_mean)
plt.plot(np.log10(alphas),RMSE_mean.reshape(len(alphas),1))
plt.xlabel('log(alpha)')
plt.ylabel('RMSE')
print('alpha is:',ridge.alpha_)
plt.show()

'''Lasso回归'''
lasso = LassoCV()
lasso.fit(X_train,y_train)
y_test_pred_lasso = ridge.predict(X_test)
y_train_pred_lasso = ridge.predict(X_train)
fs = pd.DataFrame({'columns':list(columns_names),'coef_lr':list((lr.coef_.T)),'coef_ridge':list((ridge.coef_.T)),'coef_lasso':list((lasso.coef_.T))})
print(fs.sort_values(by=['coef_lr'],ascending=False))
print('The rooted mean_squared_error score of lassoCV on test is',sqrt(mean_squared_error(y_test,y_test_pred_lasso)))
print('The rooted mean_squared_error score of lassoCV on train is',sqrt(mean_squared_error(y_train,y_train_pred_lasso)))

RMSE_means = np.mean(lasso.mse_path_,axis=1)#第一个列子 df.mean(axis=1)代表沿着列水平方向计算均值,
RMSE_means = np.sqrt(RMSE_means)
plt.plot(np.log10(lasso.alphas_),RMSE_means)
plt.xlabel('log(alpha)')
plt.ylabel('RMSE')
print('alpha is:',lasso.alpha_)
plt.show()

