#!/usr/bin/env python
# coding: utf-8

# In[8]:


import numpy as np
import pandas as pd
from sklearn.metrics import r2_score   #评价回归预测模型的的性能
import matplotlib.pyplot as plt    #画图
import seaborn as sns

get_ipython().run_line_magic('matplotlib', 'inline')


# In[9]:


#读取数据
#path to where the data lies
dpath = 'D:/0CSDN/机器学习-人工智能直通车/homework/4th week 线性回归/'
data = pd.read_csv(dpath + "day.csv")
data.head()


# # 数据准备

# ## 数据去噪

# In[10]:


#删除y大于等于50的样本（保留小于50的样本）
#df = df[df.cnt< 50]

#输出样本数和特征维数
#print(df.shape)


# # 数据分离

# In[12]:


#从原始数据中分离输入特征x和输出y
y = data['cnt'].values
X = data.drop('cnt', axis = 1)

#尝试对y（骑行量）做log变换，对log变换后的值进行估计
log_y = np.log1p(y)


# #### 题目1.对连续型特征，可以用seabon 和matplotlib 可视化其分布

# In[13]:


#单变量分析分布
#目标y(给定日期总租车人数）的直方图/分布
fig = plt.figure()
sns.distplot(data.cnt.values,bins = 30,kde = True)
plt.xlabel('Values of bicycle cnt',fontsize = 20)
plt.show()


# #### 题目2.对于两个连续型特征，可以用corr得到两个特征的相关性

# In[14]:


data_corr = data.corr()                          #矩阵
sns.heatmap(data_corr,annot = True)             #热力分布


# #### 题目3：特征之间存在强相关性，在选择线性回归模型时应该进行PCA降维（特征层面）或者加正则项（模型层面）。

# ## 去除干扰项

# In[15]:


X = X.drop("dteday",axis = 1)
#X = X.drop("hum",axis = 1)
X = X.drop("season",axis = 1)
X = X.drop("yr",axis = 1)
X = X.drop("mnth",axis = 1)
X = X.drop("holiday",axis = 1)
#X = X.drop("weekday",axis = 1)
#X = X.drop("workingday",axis = 1)
X = X.drop("weathersit",axis = 1)
X = X.drop("casual",axis = 1)                 ###丢弃类别型数值；casual、registered与cnt关联性较大，为避免对结果预测造成干扰，也进行丢弃。
X = X.drop("registered",axis = 1)
#用于后续显示权重系数对应的特征
columns = X.columns


# # 特征工程

# ## 题目4： 数值特征标准化（去量纲化）

# In[17]:


from sklearn.preprocessing import MinMaxScaler

#分别初始化对特征和目标值的标准化器
ss_x = MinMaxScaler()
ss_y = MinMaxScaler()

#分别对训练和测试数据的特征以及目标值进行标准化处理
#对训练数据，先调用fit方法训练模型，得到模型参数；然后对训练数据和测试数据进行transform
X = ss_x.fit_transform(X)

y = ss_y.fit_transform(y.reshape(-1,1))
log_y = ss_y.fit_transform(log_y.reshape(-1,1))


# ## 分割测试集和训练集

# In[18]:


#将数据分割训练数据与测试数据
from sklearn.model_selection import train_test_split

# 随机采样20%的数据构建测试样本，其余作为训练样本
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=3, test_size=0.2)     #random_state 即random_seed
X_train.shape


# ## 最小二乘

# In[19]:


from sklearn.linear_model import LinearRegression

#使用默认配置初始化
lr = LinearRegression()

#训练模型参数
lr.fit(X_train, y_train)

#预测
y_test_pred_lr = lr.predict(X_test)
y_train_pred_lr = lr.predict(X_train)

# 看看各特征的权重系数，系数的绝对值大小可视为该特征的重要性
fs = pd.DataFrame({"columns":list(columns), "coef":list((lr.coef_.T))})
fs.sort_values(by=['coef'],ascending=False)


# ### 模型评价

# In[108]:


print( 'The r2 score iof Linear_Regression on test is ',  r2_score(y_test, y_test_pred_lr))
print('The r2 score iof Linear_Regression on train is ', r2_score(y_train, y_train_pred_lr))


# ## L2正则-->岭回归

# In[120]:


from sklearn.linear_model import RidgeCV

#设置超参数（正则参数）范围
#alphas = [0.01, 0.1, 1, 10, 100]
alphas = [ 0.0186,0.0187,0.0188, 0.0189]

#生成RidgeCV实例：
ridge = RidgeCV(alphas = alphas, store_cv_values=True)

#模型训练
ridge.fit(X_train, y_train)

#预测
y_test_pred_ridge = ridge.predict(X_test)
y_train_pred_ridge = ridge.predict(X_train)


# In[121]:


ridge.alpha_


# In[122]:


print( 'The r2 score iof Linear_Regression on test is ',  r2_score(y_test, y_test_pred_ridge))
print('The r2 score iof Linear_Regression on train is ', r2_score(y_train, y_train_pred_ridge))


# In[123]:


# 看看各特征的权重系数，系数的绝对值大小可视为该特征的重要性
fs = pd.DataFrame({"columns":list(columns), "coef":list((ridge.coef_.T))})
fs.sort_values(by=['coef'],ascending=False)


# ## L1正则--> Lasso

# In[104]:


from sklearn.linear_model import LassoCV

#alphas = [0.01, 0.1, 1, 10, 100]
alphas = [ 0.000005991, 0.000005995, 0.000005997, 0.000005998,0.000005999]


lasso = LassoCV(alphas = alphas)

#训练
lasso.fit(X_train, y_train)

#测试
y_test_pred_lasso = lasso.predict(X_test)
y_train_pred_lasso = lasso.predict(X_train)


# In[105]:


lasso.alpha_


# In[106]:


print( 'The r2 score iof Linear_Regression on test is ',  r2_score(y_test, y_test_pred_lasso))
print('The r2 score iof Linear_Regression on train is ', r2_score(y_train, y_train_pred_lasso))


# In[125]:


# 看看各特征的权重系数，系数的绝对值大小可视为该特征的重要性
fs = pd.DataFrame({"columns":list(columns), "lr_coef":list((lasso.coef_.T)),"ridge_coef":list((ridge.coef_.T)),"lasso_coef":list((lasso.coef_.T))})
fs.sort_values(by=['lr_coef'],ascending=False)


# ## 题目5：不懂

# In[ ]:




