# -*- coding: utf-8 -*-

import numpy as np
import pandas as pd

from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split

import matplotlib.pyplot as plt
import seaborn as sns


# %matplotlib inline

# path to where the data lies
#dpath = './data/'
data = pd.read_csv("../Bike-Sharing-Dataset/day.csv")
# print(data.head())

# 数据的相关性测试
cols=data.columns
# Calculates pearson co-efficient for all combinations，通常认为相关系数大于0.5的为强相关
data_corr = data.corr().abs()
threshold = 0.5
# List of pairs along with correlation above threshold
corr_list = []
#size = data.shape[1]
size = data_corr.shape[0]

#Search for the highly correlated pairs
for i in range(0, size): #for 'size' features
    for j in range(i,size): #avoid repetition
        if (data_corr.iloc[i,j] >= threshold and data_corr.iloc[i,j] < 1) or (data_corr.iloc[i,j] < 0 and data_corr.iloc[i,j] <= -threshold):
            corr_list.append([data_corr.iloc[i,j],i,j]) #store correlation and columns index

#Sort to show higher ones first
s_corr_list = sorted(corr_list,key=lambda x: -abs(x[0]))

#Print correlations and column names
print("========= 数据相关性 =============")
for v,i,j in s_corr_list:
    print ("%s and %s = %.2f" % (cols[i],cols[j],v))

# filter data by year
# data_0 = data.query("yr==0")
data_0 = data[data.yr == 0]
data_1 = data.query("yr==1")

# print(data_0.head(1))
# print(data_1.head(1))

y_train_raw = data_0['cnt'].values
y_test_raw = data_1['cnt'].values


# 删除类型数据，都可以从 instant 推导出来，无用
X_train_raw = data_0.drop('cnt', axis = 1)\
    .drop('dteday', axis = 1)\
    .drop('yr', axis = 1)\
    .drop('season', axis = 1)\
    .drop('mnth', axis = 1)\
    .drop('holiday', axis = 1)\
    .drop('weekday', axis = 1)\
    .drop('workingday', axis = 1)\
    .drop('weathersit', axis = 1)\
    .drop('registered', axis = 1)\
    .drop('casual', axis = 1)

X_test_raw = data_1.drop('cnt', axis = 1)\
    .drop('dteday', axis = 1)\
    .drop('yr', axis = 1)\
    .drop('season', axis = 1)\
    .drop('mnth', axis = 1)\
    .drop('holiday', axis = 1)\
    .drop('weekday', axis = 1)\
    .drop('workingday', axis = 1)\
    .drop('weathersit', axis = 1)\
    .drop('registered', axis = 1)\
    .drop('casual', axis = 1)


# season  mnth  holiday  weekday  workingday  weathersit
    # .drop('mnth', axis=1) \
    # .drop('season', axis=1) \
    # .drop('weekday', axis=1) \
#
# X_train_raw['instant'] =data_0['instant'].astype(float)
# X_test_raw['instant'] =data_1['instant'].astype(float)
# X_train_raw['holiday'] =data_0['holiday'].astype(float)
# X_test_raw['holiday'] =data_1['holiday'].astype(float)
# X_train_raw['workingday'] =data_0['workingday'].astype(float)
# X_test_raw['workingday'] =data_1['workingday'].astype(float)
# X_train_raw['weathersit'] =data_0['weathersit'].astype(float)
# X_test_raw['weathersit'] =data_1['weathersit'].astype(float)


print("========= 原始训练数据 =============")
print(X_train_raw.head(1))
print("========= 数据验证数据 =============")
print(X_test_raw.head(1))
# print(y_train[0])

## 原始数据
# X_train = X_train_raw.values
# X_test = X_test_raw.values
# y_train = y_train_raw
# y_test = y_test_raw

from sklearn.preprocessing import LabelEncoder
class_le = LabelEncoder()
x_weather = class_le.fit_transform(data_0['weathersit'].values)
print(data_0['weathersit'].values)
print(x_weather)


from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(categorical_features=[0])
x2_weather = ohe.fit_transform(data_0['weathersit'].values).toarray()
print(x2_weather)

## data standard
from sklearn.preprocessing import StandardScaler

ss_X = StandardScaler()
ss_y = StandardScaler()

X_train = ss_X.fit_transform(X_train_raw)
X_test = ss_X.transform(X_test_raw)

y_train = ss_y.fit_transform(y_train_raw.reshape(-1, 1))
y_test = ss_y.transform(y_test_raw.reshape(-1, 1))

print("========= 数据标准化 =============")
print(X_train[0])
print(X_test[0])
print(y_train[0])
print(y_test[0])
# print(X_test.head(1))

print("========= 岭回归 =============")
from sklearn.linear_model import  RidgeCV

#设置超参数（正则参数）范围
alphas = [ 0.01 ,0.1, 1, 10 ]
#n_alphas = 20
#alphas = np.logspace(-5,2,n_alphas)

#生成一个RidgeCV实例
ridge = RidgeCV(alphas=alphas, store_cv_values=True)
#模型训练
ridge.fit(X_train, y_train)
#预测
y_test_pred_ridge = ridge.predict(X_test)
y_train_pred_ridge = ridge.predict(X_train)

print ('alpha is:', ridge.alpha_)
# 评估，使用r2_score评价模型在测试集和训练集上的性能
print 'The r2 score of RidgeCV on test is', r2_score(y_test, y_test_pred_ridge)
print 'The r2 score of RidgeCV on train is', r2_score(y_train, y_train_pred_ridge)

print("========= 拉锁回归 =============")
from sklearn.linear_model import LassoCV

#设置超参数搜索范围
alphas = [ 0.01, 0.1, 1, 10]

#生成一个LassoCV实例
lasso = LassoCV(alphas = alphas)

#训练（内含CV）
lasso.fit(X_train, y_train)

#测试
y_test_pred_lasso = lasso.predict(X_test)
y_train_pred_lasso = lasso.predict(X_train)

print ('alpha is:', lasso.alpha_)
# 评估，使用r2_score评价模型在测试集和训练集上的性能
print 'The r2 score of LassoCV on test is', r2_score(y_test, y_test_pred_lasso)
print 'The r2 score of LassoCV on train is', r2_score(y_train, y_train_pred_lasso)