import pandas as pd
import numpy as np

# 模型
from sklearn.linear_model import LinearRegression, RidgeCV, LassoCV, ElasticNetCV

# 模型评估
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score  # 评价回归预测模型的性能

# 可视化
import matplotlib.pyplot as plt

data = pd.read_csv("res/FE_day.csv")
# print(data.head())

y = data['cnt']
X = data.drop(['cnt'], axis=1)

from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)

print("train samples:", X_train.shape)

testID = X_test['instant']
print(X_train.keys())

#ID不参与预测
X_train = X_train.drop(['instant'], axis=1)
X_test = X_test.drop(['instant'], axis=1)

# 保存特征名字以备后用（可视化）
feat_names = X_train.columns
print(X_train.keys())
# Linear Regression
def Linear_test():
    lr = LinearRegression()
    lr.fit(X_train, y_train)

    y_train_pred = lr.predict(X_train)
    y_test_pred = lr.predict(X_test)

    rmse_train = np.sqrt(mean_squared_error(y_train, y_train_pred))
    rmse_test = np.sqrt(mean_squared_error(y_test, y_test_pred))
    print("RMSE on Training set :", rmse_train)
    print("RMSE on Test set :", rmse_test)
    r2_score_train = r2_score(y_train, y_train_pred)
    r2_score_test = r2_score(y_test, y_test_pred)
    print("r2_score on Training set :", r2_score_train)
    print("r2_score on Test set :", r2_score_test)
    print(lr.coef_.shape)   # (34,)
    """
    RMSE on Training set : 750.239646786
    RMSE on Test set : 783.169153172
    r2_score on Training set : 0.844514307127
    r2_score on Test set : 0.855788073035
    """
    # Plot important coefficients
    coefs = pd.Series(lr.coef_, index = feat_names)
    print("Ridge picked " + str(sum(coefs != 0)) + " features and eliminated the other " +  \
          str(sum(coefs == 0)) + " features")

    #正系数值最大的10个特征和负系数值最小（绝对值大）的10个特征
    imp_coefs = pd.concat([coefs.sort_values().head(10),
                         coefs.sort_values().tail(10)])
    imp_coefs.plot(kind = "barh")
    plt.title("Coefficients in the OLS Model")


def RidgCV_test():
    alphas = [0.01, 0.1, 1, 10, 100, 1000]#[i/10 for i in range(1, 100)]
    ridge = RidgeCV(alphas=alphas, store_cv_values=True)

    ridge.fit(X_train, y_train)
    alpha = ridge.alpha_  # 通过交叉验证得到的最佳超参数alpha
    print('best alpha : ', alpha)
    rmse_cv = np.sqrt(np.mean(ridge.cv_values_,axis=0))
    print('cv of rmse: ' , rmse_cv) # 交叉验证估计的测试误差

    # 训练上测试，训练误差，实际任务中这一步不需要
    y_train_pred = ridge.predict(X_train)
    rmse_train = np.sqrt(mean_squared_error(y_train,y_train_pred))

    y_test_pred = ridge.predict(X_test)
    rmse_test = np.sqrt(mean_squared_error(y_test, y_test_pred))

    print("RMSE on Training set :", rmse_train)
    print("RMSE on Test set :", rmse_test)

    r2_score_train = r2_score(y_train,y_train_pred)
    r2_score_test = r2_score(y_test,y_test_pred)
    print("r2_score on Training set :", r2_score_train)
    print("r2_score on Test set :", r2_score_test)
    """
    cv of rmse:  [  803.68354877   800.42806695   799.58231128   831.68362477  1008.30035251
      1341.71206857]
    RMSE on Training set : 755.174044371
    RMSE on Test set : 778.125407136
    r2_score on Training set : 0.842462292688
    r2_score on Test set : 0.857639591682
    """
    coefs = pd.Series(ridge.coef_, index=feat_names)
    print("Ridge picked " + str(sum(coefs != 0)) + " features and eliminated the other " + \
          str(sum(coefs == 0)) + " features")

    # 正系数值最大的10个特征和负系数值最小（绝对值大）的10个特征
    imp_coefs = pd.concat([coefs.sort_values().head(10),
                           coefs.sort_values().tail(10)])
    imp_coefs.plot(kind="barh")
    plt.title("Coefficients in the Ridge Model")
    plt.show()

    mse_mean = np.mean(ridge.cv_values_, axis=0)
    plt.plot(np.log10(alphas), mse_mean.reshape(len(alphas), 1))

    plt.xlabel('log(alpha)')
    plt.ylabel('mse')
    plt.show()

# RidgCV_test()

def Lasso_test():
    alphas = [i / 10 for i in range(10, 100)]
    print(X_train.head())
    lasso = LassoCV()

    # 2.模型训练
    lasso.fit(X_train, y_train)
    alpha = lasso.alpha_
    print("Best alpha :", alpha)

    # 3. 模型性能：cv
    mse_cv = np.mean(lasso.mse_path_, axis=1)
    rmse_cv = np.sqrt(mse_cv)
    print("cv of rmse :", min(rmse_cv))

    # 4. 特征重要性
    # Plot important coefficients
    coefs = pd.Series(lasso.coef_, index=feat_names)
    print("Lasso picked " + str(sum(coefs != 0)) + " features and eliminated the other " + \
          str(sum(coefs == 0)) + " features")
    imp_coefs = pd.concat([coefs.sort_values().head(10),
                           coefs.sort_values().tail(10)])
    imp_coefs.plot(kind="barh")
    plt.title("Coefficients in the Lasso Model")
    plt.show()

    # 5. 显示不同alpha对应的模型性能
    plt.plot(np.log10(lasso.alphas_), mse_cv)

    plt.xlabel('log(alpha)')
    plt.ylabel('mse')
    plt.show()

    # 训练误差
    y_train_pred = lasso.predict(X_train)
    rmse_train = np.sqrt(mean_squared_error(y_train, y_train_pred))
    print("RMSE on Training set :", rmse_train)

    # 测试误差
    y_test_pred = lasso.predict(X_test)
    rmse_test = np.sqrt(mean_squared_error(y_test, y_test_pred))
    print("RMSE on Test set :", rmse_test)

    r2_score_train = r2_score(y_train, y_train_pred)
    r2_score_test = r2_score(y_test, y_test_pred)
    print("r2_score on Training set :", r2_score_train)
    print("r2_score on Test set :", r2_score_test)
    # Best alpha : 2.33645316605
    # cv of rmse : 828.555046051
    # Lasso picked 27 features and eliminated the other 6 features
    # RMSE on Training set : 754.265627584
    # RMSE on Test set : 786.623058576
    # r2_score on Training set : 0.842841076424
    # r2_score on Test set : 0.854513271312
Lasso_test()



plt.show()