
# coding: utf-8

# # Capital Bikeshare每天单车共享数量预测
# 要求以2011年的数据训练模型，预测2012年每天的单车共享数量

# ## step 0 数据探索
# mnth:月份( 1 to 12)
# hr:小时 (0 to 23) (只在 hour.csv 有，作业忽略此字段) holiday:是否是节假日
# weekday:星期中的哪天，取值为 0~6 workingday:是否工作日
# 1=工作日 (非周末和节假日)
# 0=周末 
# weathersit:天气  1:晴天，多云 2:雾天，阴天 3:小雪，小雨 4:大雨，大雪，大雾
# temp:气温摄氏度
# atemp:体感温度
# hum:湿度
# windspeed:风速

# In[410]:


import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O

import matplotlib.pyplot as plt
import seaborn as sns
#color = sns.color_palette()

#get_ipython().magic('matplotlib inline')

data = pd.read_csv("/Volumes/TFDisk/download/Bike-Sharing-Dataset/day.csv")
data.tail()


# In[411]:


data.shape


# In[414]:


data.describe()


# In[415]:


#把2011和2012年的数据分割
data2011 = data[data.yr == 0]
data2012 = data[data.yr == 1]


# In[416]:


# 2011年cnt的直方图／分布
fig = plt.figure()
sns.distplot(data2011.cnt.values, bins=70, kde=True)
plt.xlabel('cnt/day 2011', fontsize=12)
plt.show()

#曲线看到有两个峰，分别对应1800和4500左右（2012年只有一个峰在7000左右）
# In[417]:


fig = plt.figure()
sns.distplot(data2012.cnt.values, bins=70, kde=True)
plt.xlabel('cnt/day 2012', fontsize=12)
plt.show()


# In[418]:


# 单个特征散点图
plt.scatter(range(data2011.shape[0]), data2011["cnt"].values,color='green')
plt.title("Distribution of cnt");


# 个别日子租出去很多，个别日子租出去很少，在1000-2000和4000-5000这两段比较聚集，跟直方图的两个峰的提示一样

# In[419]:


# 天气（weathersit）的直方图／分布
sns.countplot(data2011.weathersit);
plt.xlabel('1:sunny 2:cloudy 3:rain');
plt.ylabel('count');


# In[420]:


# 温度（temp)直方图
fig = plt.figure()
sns.distplot(data2011.temp.values*100, bins=80, kde=False)
plt.xlabel('‘C', fontsize=12)
plt.show()


# In[421]:


#get the names of all the columns
cols=data2011.columns 
data_corr = data2011.corr().abs()
data_corr.shape


# In[422]:


plt.subplots(figsize=(13, 9))
sns.heatmap(data_corr,annot=True)

# Mask unimportant features
sns.heatmap(data_corr, mask=data_corr < 1, cbar=False)

plt.show()


# 温度和体感温度的相关度强相关，月份和季节强相关，湿度和天气相关，温度、体感温度都和cnt有较强相关。workingday居然和cnt不怎么相关
# yr、casual、registered需要降维

# # step 1 数据准备

# In[429]:


import math
# from sklearn.preprocessing import OneHotEncoder 
#one hot 用不来，自己瞎搞一下

#增加新特征


#丢弃不需要的特征,增加新特征
X = data.drop('dteday', axis = 1)    .drop('instant', axis = 1)    .drop('atemp', axis = 1)    .drop('season', axis = 1)    .drop('mnth', axis = 1)    .drop('holiday', axis = 1)    .drop('hum', axis = 1)    .drop('casual', axis = 1)    .drop('temp', axis = 1)    .drop('weekday', axis = 1)    .drop('windspeed',axis = 1)    .drop('weathersit',axis = 1)    .drop('registered', axis = 1)


instant_sqr = np.zeros(len(data))
atemp_abs = np.zeros(len(data))

hum_abs = np.zeros(len(data))

windspeed_sqr = np.zeros(len(data))
weathersit_e =np.zeros(len(data))
#不会用one_hot，自己实现一下试试
season_1 = np.zeros(len(data))
season_2 = np.zeros(len(data))
season_3 = np.zeros(len(data))
season_4 = np.zeros(len(data))
weathersit_1 = np.zeros(len(data))
weathersit_2 = np.zeros(len(data))
weathersit_3 = np.zeros(len(data))
weathersit_4 = np.zeros(len(data))

for i in range(len(data)):
    instant_sqr[i]=data["instant"][i]**1.9
    # 市场初期用户数的增长率和用基数接近于正比(随着市场饱和度收敛)，因此用户数增长和时间成接近于平方正比
    atemp_abs[i]=math.fabs(data["atemp"][i]-0.23)**0.8
    # 23度的天气是最适合骑车的，高了或者低了都会减少骑车人数
    hum_abs[i]=math.fabs(data["hum"][i]-0.2)**0.1
    # 湿度高了或者低了都会减少骑车人数
    windspeed_sqr[i] = data["windspeed"][i]**4
    # 风力为骑车造成的阻力是平方正比的，同时还通过影响体感温度和其他方式平方正比的影响了骑车感受
    '''
    # one_hot搞不定，只好先放弃几个离散值的维度
    season_1[i] = 1 if data["season"][i]==1 else 0
    season_2[i] = 1 if data["season"][i]==2 else 0
    season_3[i] = 1 if data["season"][i]==3 else 0
    season_4[i] = 1 if data["season"][i]==4 else 0
    weathersit_1[i] = 1 if data["weathersit"][i]==1 else 0
    weathersit_2[i] = 1 if data["weathersit"][i]==2 else 0
    weathersit_3[i] = 1 if data["weathersit"][i]==3 else 0
    weathersit_4[i] = 1 if data["weathersit"][i]==4 else 0
    '''

    


X.insert(0,'instant_sqr', instant_sqr)
X.insert(0,'atemp_abs', atemp_abs)
X.insert(0,'hum_abs', hum_abs)
X.insert(0,'windspeed_sqr', windspeed_sqr)
'''
one_hot不成，放弃
X.insert(0,'season_1', season_1)
X.insert(0,'season_2', season_2)
X.insert(0,'season_3', season_3)
X.insert(0,'season_4', season_4)
X.insert(0,'weathersit_1', weathersit_1)
X.insert(0,'weathersit_2', weathersit_2)
X.insert(0,'weathersit_3', weathersit_3)
X.insert(0,'weathersit_4', weathersit_4)
'''



#X.insert(0,'o_season', o_season)
    
#把2011和2012年的数据分割
X_2011_train = X[X.yr == 0].drop('yr', axis = 1)
X_2012_test = X[X.yr == 1].drop('yr', axis = 1)

y_2011_train = X_2011_train['cnt'].values
y_2012_test = X_2012_test['cnt'].values

X_2011_train = X_2011_train.drop('cnt',axis = 1)
X_2012_test = X_2012_test.drop('cnt',axis = 1)
#用于后续显示权重系数对应的特征
columns = X_2011_train.columns

#print(y_2011_train,'\n',y_2012_test)
#X_2011_train.tail()

# 数据标准化
from sklearn.preprocessing import StandardScaler

# 分别初始化对特征和目标值的标准化器
ss_X = StandardScaler()
ss_y = StandardScaler()

# 分别对训练和测试数据的特征以值进行标准化处理
X_2011_train = ss_X.fit_transform(X_2011_train)
X_2012_test = ss_X.transform(X_2012_test)

# 目标值也需要标准化

y_2011_train = ss_y.fit_transform(y_2011_train.reshape(-1, 1))
y_2012_test = ss_y.transform(y_2012_test.reshape(-1, 1))



# # Step2 岭回归

# In[430]:


#岭回归／L2正则
#class sklearn.linear_model.RidgeCV(alphas=(0.1, 1.0, 10.0), fit_intercept=True, 
#                                  normalize=False, scoring=None, cv=None, gcv_mode=None, 
#                                  store_cv_values=False)
from sklearn.linear_model import  RidgeCV
from sklearn.metrics import r2_score  

#设置超参数（正则参数）范围
alphas = [ 1e-04,1e-03,1e-02, 1e-01,1,1e+01,1e+02,1e+03]

#n_alphas = 20
#alphas = np.logspace(-5,2,n_alphas)

#生成一个RidgeCV实例
ridge = RidgeCV(alphas=alphas, store_cv_values=True)  

#模型训练
ridge.fit(X_2011_train, y_2011_train)



#预测
y_test_pred_ridge = ridge.predict(X_2012_test)
y_train_pred_ridge = ridge.predict(X_2011_train)


# 评估，使用r2_score评价模型在测试集和训练集上的性能
print ('The r2 score of RidgeCV on test is', r2_score(y_2012_test, y_test_pred_ridge))
print ('The r2 score of RidgeCV on train is', r2_score(y_2011_train, y_train_pred_ridge))
#print(y_2011_train)
#print(y_train_pred_ridge)


# 可视化

# In[431]:


mse_mean = np.mean(ridge.cv_values_, axis = 0)
plt.plot(np.log10(alphas), mse_mean.reshape(len(alphas),1)) 

#这是为了标出最佳参数的位置，不是必须
#plt.plot(np.log10(ridge.alpha_)*np.ones(3), [0.28, 0.29, 0.30])

plt.xlabel('log(alpha)')
plt.ylabel('mse')
plt.show()

print ('alpha is:', ridge.alpha_)

# 看看各特征的权重系数，系数的绝对值大小可视为该特征的重要性
fs = pd.DataFrame({"columns":list(columns),  "coef_ridge":list((ridge.coef_.T))})
fs.sort_values(by=['coef_ridge'],ascending=False)


# ## Lasso

# In[432]:


#### Lasso／L1正则
# class sklearn.linear_model.LassoCV(eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, 
#                                    normalize=False, precompute=’auto’, max_iter=1000, 
#                                    tol=0.0001, copy_X=True, cv=None, verbose=False, n_jobs=1,
#                                    positive=False, random_state=None, selection=’cyclic’)
from sklearn.linear_model import LassoCV

#设置超参数搜索范围
alphas = [ 1e-06,1e-05,1e-04,1e-03,1e-02, 1e-01, 1,1e+1]

#生成一个LassoCV实例
lasso = LassoCV(alphas=alphas)  
#lasso = LassoCV()  

#训练（内含CV）
lasso.fit(X_2011_train, y_2011_train)  

#测试
y_test_pred_lasso = lasso.predict(X_2012_test)
y_train_pred_lasso = lasso.predict(X_2011_train)


# 评估，使用r2_score评价模型在测试集和训练集上的性能
print ('The r2 score of LassoCV on test is', r2_score(y_2012_test, y_test_pred_lasso))
print ('The r2 score of LassoCV on train is', r2_score(y_2011_train, y_train_pred_lasso))



# In[433]:


mses = np.mean(lasso.mse_path_, axis = 1)
plt.plot(np.log10(lasso.alphas_), mses) 
#plt.plot(np.log10(lasso.alphas_)*np.ones(3), [0.3, 0.4, 1.0])
plt.xlabel('log(alpha)')
plt.ylabel('mse')
plt.show()    
            
print ('alpha is:', lasso.alpha_)

# 看看各特征的权重系数，系数的绝对值大小可视为该特征的重要性
fs = pd.DataFrame({"columns":list(columns),  "coef_ridge":list((ridge.coef_.T)), "coef_lasso":list((lasso.coef_.T))})
fs.sort_values(by=['coef_lasso'],ascending=False)

