import numpy as np
import pandas as pd

from sklearn.metrics import r2_score

import matplotlib.pyplot as plt
import seaborn as sns

#读取做完特征工程的数据
df = pd.read_csv("D:\AITest\pro_python\FE_day.csv")
# print(df.head())
#从原始数据中分离输出特征x和输出y
y = df['cnt']
X = df.drop(['cnt'], axis=1)
#特征名称，用于显示权重系数对应的特征
feat_names = X.columns
#将数据分割训练数据和测试数据
from sklearn.model_selection import train_test_split
#随机将20%的数据构建测试样本，其余为训练样本,random_state为任意整数
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=33, test_size=0.2)
# print(X_train.shape)
#线性回归
#class sklearn.linear_model.LinearRegression(fit_intercept=True, normalize=False,copy_X=True, n_jobs=1)
from sklearn.linear_model import LinearRegression
#1、使用默认配置初始化学习器实例
lr = LinearRegression()
#2、用训练数据训练模型
lr.fit(X_train, y_train)
#3、用训练好的模型对测试集进行预测
y_test_pred_lr = lr.predict(X_test)
y_train_pred_lr = lr.predict(X_train)
#看看各个特征的权重系数，系数的绝对值大小可视化为该特征的重要性
fs = pd.DataFrame({"columns":list(feat_names), "coef":list((lr.coef_.T))})
fs.sort_values(by=['coef'], ascending=False)
#使用r2_score评价模型在测试集和训练集上的性能，并输出测试结果
print('The r2 score of LinearRegression on test is', r2_score(y_test, y_test_pred_lr))
print('The r2 score of LinearRegression on test is', r2_score(y_train, y_train_pred_lr))
#在训练集上观察预测残差分布，看是否符合模型假设：噪声为0均值的高斯噪声
f, ax = plt.subplots(figsize=(3, 5))
f.tight_layout()
ax.hist(y_train - y_train_pred_lr, bins=40, label='Residuals Linear', color='g', alpha=.5)
ax.set_title("Histogram of Residuals")
# ？？？？？？？
# ax.legend=(loc='best')
#还可以用散点图观察预测值与真实值
plt.figure(figsize=(4, 3))
plt.scatter(y_train, y_train_pred_lr)
plt.plot([-3, 3], [-3, 3], '--k')#数据已经标准化，3倍标准差即可
plt.axis('tight')
plt.xlabel('True counts')
plt.ylabel('Predicted counts')
plt.tight_layout()


#岭回归、L2正则
#class sklearn_model.RidgeCV(alphas=(0.1,1.0,10.0),fit_intercept=True,
# normalize=False,scoring=None,cv=None,gcv_mode=None,
# store_cv_values=False
from sklearn.linear_model import RidgeCV
#1、设置超参数（正则参数）范围
alphas = [0.01, 0.1, 1, 100]
#n_alphas = 20
#alphas = np.logspace(-5, 2, n_alphas)
#2、生成一个RidgeCV实例
ridge = RidgeCV(alphas=alphas, store_cv_values=True)
#3、模型训练
ridge.fit(X_train, y_train)
#4、预测
y_test_pred_ridge = ridge.predict(X_test)
y_train_pred_ridge = ridge.predict(X_train)
#使用r2_score评价模型在测试集和训练集行的性能
print('The r2 score of RidgeCV on test is', r2_score(y_test, y_test_pred_ridge))
print('The r2 score of RidgeCV on train is', r2_score(y_train, y_train_pred_ridge))
#可视化
mse_mean = np.mean(ridge.cv_values_, axis=0)
plt.plot(np.log10(alphas), mse_mean.reshape(len(alphas), 1))
#这是为了标出最佳的位置，不是必须
#plt.plot(np.log10(ridge.alpha_)*np.ones(3), [0.28, 0.29, 0.30])
plt.xlabel('log(alpha)')
plt.ylabel('mse')
plt.show()
print('alpha is:', ridge.alpha_)
#看看各个特征的权重系数，系数的绝对值大小可视化为该特征的重要性
fs = pd.DataFrame({"columns": list(feat_names), "coef_lr": list(lr.coef_.T), "coef_ridge": list(ridge.coef_.T)})
fs.sort_values(by=['coef_lr'], ascending=False)

from sklearn.linear_model import LassoCV

# 1.设置超参数（正则参数）范围
alphas = [0.01, 0.1, 1, 10, 100]

# 2.生成一个RidgeCV实例
lasso = LassoCV(alphas=alphas)
# lasso = LassoCV()

# 3.训练模型
lasso.fit(X_train, y_train)

# 4.预测
y_test_pred_lasso = lasso.predict(X_test)
y_train_pred_lasso = lasso.predict(X_train)

# 5.使用r2_score评价模型在测试集和训练集上的性能，并输出评价结果
from sklearn.metrics import r2_score
#测试集
print("The r2 score of LassoCV on test is", r2_score(y_test, y_test_pred_lasso))
#训练集
print("The r2 score of LassoCV on train is", r2_score(y_train, y_train_pred_lasso))

















