import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn.linear_model import LinearRegression, RidgeCV, Ridge, Lasso
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV


keys_X = ['season_1', 'season_2', 'season_3', 'season_4', 'mnth_1',
          'mnth_2', 'mnth_3', 'mnth_4', 'mnth_5', 'mnth_6', 'mnth_7', 'mnth_8',
          'mnth_9', 'mnth_10', 'mnth_11', 'mnth_12', 'weathersit_1',
          'weathersit_2', 'weathersit_3', 'weekday_0', 'weekday_1', 'weekday_2',
          'weekday_3', 'weekday_4', 'weekday_5', 'weekday_6', 'temp', 'atemp',
          'hum', 'windspeed', 'holiday', 'workingday', 'yr']

df = pd.read_csv('res/FE_day.csv')
print(df.keys(), type(df.keys()))

X = df[keys_X]
y = df['cnt']

## \3. 对全体数据，随机选择其中80%做训练数据，剩下20%为测试数据，
# 评价指标为RMSE。（10分）
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)


# print('X_train-----')
# print(X_train.info(), X_train.head())

def RMSE_score(reg, x_test, y_test):
    """
    功能：使用RMSE评价模型
    :param reg: 训练好的模型
    :param x_test: 测试数据
    :param y_test: 测试数据y值
    :return: 数据偏差的开方均方结果
    """
    y_preditc = reg.predict(x_test)  # reg是训练好的模型
    mse_test = np.sum((y_preditc - y_test) ** 2) / len(y_test)  # 跟数学公式一样的
    rmse_error = mse_test ** 0.5
    return rmse_error


# \4. 用训练数据训练最小二乘线性回归模型（20分）、岭回归模型、
# Lasso模型，其中岭回归模型（30分）和Lasso模型（30分），
# 注意岭回归模型和Lasso模型的正则超参数调优。

# 评价模型教程https://www.jianshu.com/p/9ee85fdad150


def LinearRegration_test():
    print('-----------最小二乘-----------')
    lr = LinearRegression()
    lr.fit(X_train, y_train)

    score_train = lr.score(X_train, y_train)
    score_test = lr.score(X_test, y_test)

    print('score_train=', score_train)  # score= 0.846772228569
    print('score_test=', score_test)  # score= 0.842320887555
    # RMSE评价： 开方均方误差
    rmse_error = RMSE_score(lr, X_test, y_test)
    print('rmse_error------:\n', rmse_error)  # 795.157016112


# LinearRegration_test()

# diff = [abs(b-a) for a,b in zip(y_test_pred_lr, y_test)]
# print(diff)
# n, bins, patches = plt.hist(diff, bins=256, normed=0,edgecolor='None',facecolor='red')


def Ridge_test():
    print('-----------岭回归-----------')

    # 第一步先运行下面一段代码获得了最优参数 alpha=0.9
    # rg = Ridge()
    # parameters = {"alpha": [i/10 for i in range(1,100)]}
    # clf = GridSearchCV(rg, parameters, refit=True)
    # clf.fit(X_train, y_train)
    #
    # print("keys: ", clf.cv_results_.keys())
    # print("best_estimator_", clf.best_estimator_)
    # print("best_score_", clf.best_score_)        #best_score_ 0.821173519481
    # print("best_index_", clf.best_index_)       #best_index_ 8
    # print("best_params_", clf.best_params_)    # best_params_ {'alpha': 0.9}

    # 第二步，使用超参数调优后的alpha值，0.9 训练Ridge
    rg = Ridge(alpha=0.9)
    rg.fit(X_train, y_train)
    score_train = rg.score(X_train, y_train)
    score_test = rg.score(X_test, y_test)

    print('score_train=', score_train)  # score_train= 0.846304923588
    print('score_test=', score_test)  # score_test= 0.842773329313

    rmse_error = RMSE_score(rg, X_test, y_test)
    print('rmse_error------:\n', rmse_error)  # 794.015391588

"""
dict_keys(['mean_fit_time', 'std_fit_time', 
'mean_score_time', 'std_score_time', 'param_alpha', 
'params', 'split0_test_score', 'split1_test_score', 
'split2_test_score', 'mean_test_score', 'std_test_score', 
'rank_test_score', 'split0_train_score', 'split1_train_score', 
'split2_train_score', 'mean_train_score', 'std_train_score'])
best_estimator_ Ridge(alpha=0.9, copy_X=True, fit_intercept=True, 
max_iter=None,
   normalize=False, random_state=None, solver='auto', tol=0.001)
"""

# Ridge_test()

def Lasso_test():
    print('-----------Lasso-----------')
    # 第一步，超参数调优部分
    # la = Lasso()
    # parameters = {"alpha": [i/10 for i in range(1,100)]}
    # clf = GridSearchCV(la, parameters, refit=True)
    # clf.fit(X_train, y_train)
    # print("best_estimator_", clf.best_estimator_)
    # print("best_score_", clf.best_score_)        #best_score_ 0.819268011178
    # print("best_index_", clf.best_index_)       #best_index_ 32
    # print("best_params_", clf.best_params_)    # best_params_ {'alpha': 3.3}

    la = Lasso(alpha=3.3)
    la.fit(X_train, y_train)
    score_train = la.score(X_train, y_train)
    score_test = la.score(X_test, y_test)
    print('score_train=', score_train)  # score_train=0.845411116595
    print('score_test=', score_test)  # score_test=0.840949955667
    rmse_error = RMSE_score(la, X_test, y_test)
    print('rmse_error------:\n', rmse_error)  # 798.606258282


Lasso_test()



plt.show()



# \5. 比较用上述三种模型得到的各特征的系数，
# 以及各模型在测试集上的性能。并简单说明原因。（10分）

"""
最终得到的结果：
模型              训练得分                测试得分        RMSE_开方均方误差
linearr     0.846772228569          0.842320887555      795.157016112
ridge       0.846304923588          0.842773329313      794.015391588
lasso       0.845411116595          0.840949955667      798.606258282

最终的训练得分和测试得分差别不大，RMSE误差也差不太多，不过感觉这个误差都比较大啊。不太明白
"""