
# coding: utf-8

# # HW1_ZTT_AME_House线性回归分析

# In[59]:


import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns

get_ipython().run_line_magic('matplotlib', 'inline')


# In[60]:


train=pd.read_csv('train_fin_ztt.csv')
test=pd.read_csv('test_fin_ztt.csv')

train.info()


# In[61]:


y_train=train['SalePrice'].values
train_id=train['Id'].values
test_id=test['Id']

X_train=train.drop(['SalePrice','Id'],axis=1)
X_test=test.drop(['Id'],axis=1)
X_train.shape


# 数据标准化

# In[62]:


from sklearn.preprocessing import StandardScaler
ss_X = StandardScaler()
X_train = ss_X.fit_transform(X_train)
X_test = ss_X.transform(X_test)

mean_y = y_train.mean()
std_y = y_train.std()
y_train = (y_train - mean_y)/std_y


# In[63]:


#线性回归模型训练
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score

lr=LinearRegression()

lr.fit(X_train,y_train)

y_train_pred_lr=lr.predict(X_train)

y_test_pred_lr = lr.predict(X_test)

print('The r2 score of LinearRegression on train is', r2_score(y_train, y_train_pred_lr))


# In[64]:


f, ax = plt.subplots(figsize=(7, 5)) 
f.tight_layout() 
ax.hist(y_train - y_train_pred_lr,bins=40, label='Residuals Linear', color='b', alpha=.5); 
ax.set_title("AMES_House of Residuals") 
ax.legend(loc='best');


# 看样子训练的模型好像挺好

# In[65]:


# 线性模型，随机梯度下降优化参数
from sklearn.linear_model import SGDRegressor

sgdr = SGDRegressor(max_iter=1000)

sgdr.fit(X_train, y_train)

print('The value of default measurement of SGDRegressor on train is', sgdr.score(X_train, y_train))


# 好像比最小二乘回归结果更好一些

# In[66]:


#岭回归 L2正则
from sklearn.linear_model import  RidgeCV

alphas = [0.1,1,10,100,1000,10000]

ridge = RidgeCV(alphas=alphas, store_cv_values=True)

ridge.fit(X_train, y_train)

y_test_pred_ridge = ridge.predict(X_test)
y_train_pred_ridge = ridge.predict(X_train)


print('The r2 score of RidgeCV on train is', r2_score(y_train, y_train_pred_ridge))


# In[67]:


mse_mean = np.mean(ridge.cv_values_, axis = 0)
plt.plot(np.log10(alphas), mse_mean.reshape(len(alphas),1)) 

plt.xlabel('log(alpha)')
plt.ylabel('mse')
plt.show()

print ('alpha is:', ridge.alpha_)


# 这个正则超参数有点大啊...不知道是否和特征维数太大有关

# In[68]:


#Lasso回归 L1正则
from sklearn.linear_model import LassoCV

alphas = [0.001,0.01,0.1,1,10,100,]

lasso = LassoCV(alphas=alphas)

lasso.fit(X_train, y_train)

y_test_pred_lasso = lasso.predict(X_test)
y_train_pred_lasso = lasso.predict(X_train)

print('The r2 score of LassoCV on train is', r2_score(y_train, y_train_pred_lasso))

mses = np.mean(lasso.mse_path_, axis = 1)
plt.plot(np.log10(lasso.alphas_), mses) 

plt.xlabel('log(alpha)')
plt.ylabel('mse')
plt.show()    
            
print ('alpha is:', lasso.alpha_)


# 为什么lasso的正则这么小，和L2的差太多
# 将测试集的结果输出

# In[69]:


#最小二乘回归结果
y_test_pred_lr=y_test_pred_lr * std_y +  mean_y
#岭回归
y_test_pred_ridge=y_test_pred_ridge * std_y +  mean_y
#Lasso回归
y_test_pred_lasso=y_test_pred_lasso * std_y +  mean_y


y_test=pd.DataFrame([y_test_pred_lr,y_test_pred_ridge,y_test_pred_lasso]).T
y_test.columns=['y_test_pred_lr','y_test_pred_ridge','y_test_pred_lasso']
y_test=pd.concat([test_id,y_test],axis=1)

y_test.to_csv('HW1_AMES_House_pre.csv')

