import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# from sklearn.linear_model import Lasso, Ridge,LinearRegression,ElasticNet
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.linear_model import LassoCV, RidgeCV, ElasticNetCV, LinearRegression
from sklearn import preprocessing

import warnings

warnings.filterwarnings('ignore')

if __name__ == '__main__':

    path = 'data/boston_housing_data.csv'
    boston_df = pd.read_csv(path)

    # 可修改特征array: 要分割的数组; indices_or_sections: 一个整数或者序列，表示分割的方式：axis: 指定沿着哪个轴进行分割。默认是0，表示沿着第一个轴（通常是行）进行分割。如果设置为1，则沿着第二个轴（通常是列）进行分割。
    x, y = np.split(boston_df, (13,), axis=1)

    print('样本个数：%d, 特征个数：%d' % x.shape)

    # 看协方差矩阵
    corr = boston_df.corr()
    # 计算每一个特征和房价的相关系数
    print(boston_df.corr()['MEDV'])
    print(corr)

    # ZN 0.360445,CHAS 0.175260,RM 0.695360,DIS 0.249929,B 0.333461
    # 查看数据是否存在空值，从结果来看数据不存在空值。
    print(boston_df.isnull().sum())
    # 查看数据大小
    boston_df.shape
    # 显示数据前5行
    boston_df.head()
    # 查看数据的描述信息，在描述信息里可以看到每个特征的均值，最大值，最小值等信息。
    print(boston_df.describe())

    # 看散点图，,看特征与标签的关系
    # RM 和房价的散点图
    # plt.figure(facecolor='gray')
    # plt.scatter(boston_df['RM'], boston_df['MEDV'], s=30, edgecolor='white')
    # plt.title('RM')
    # plt.xlabel('RM')
    # plt.ylabel('MEDV')
    # plt.show()
    # # CRIM 和房价的散点图
    # plt.figure(facecolor='gray')
    # plt.scatter(boston_df['LSTAT'], boston_df['MEDV'], s=30, edgecolor='white')
    # plt.title('LSTAT')
    # plt.xlabel('LSTAT')
    # plt.ylabel('LSTAT')
    # plt.show()

    # 多一步：选择合适的特征
    x = boston_df[['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PIRATIO', 'B', 'LSTAT']]
    # x = boston_df[[ 'RM','LSTAT','ZN','AGE','DIS','CHAS']]
    # x = boston_df[['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PIRATIO', 'B', 'LSTAT']]
    # x = boston_df[['RM', 'LSTAT', 'TAX', 'NOX']]
    x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=1)

    # 初始化标准化器
    min_max_scaler = preprocessing.MinMaxScaler()
    # 分别对训练和测试数据的特征以及目标值进行标准化处理
    x_train = min_max_scaler.fit_transform(x_train)
    y_train = min_max_scaler.fit_transform(y_train)  # reshape(-1,1)指将它转化为1列，行自动确定
    x_test = min_max_scaler.fit_transform(x_test)
    y_test = min_max_scaler.fit_transform(y_test)

    models = [
        Pipeline([
            ('poly', PolynomialFeatures(degree=2)),
            ('linear', LinearRegression(fit_intercept=False))
        ]),
        Pipeline([
            ('poly', PolynomialFeatures(degree=4)),
            ('linear', LassoCV(alphas=np.logspace(-4, 3, 20), fit_intercept=False))
        ]),
        Pipeline([
            ('poly', PolynomialFeatures(degree=10)),
            ('linear', RidgeCV(alphas=np.logspace(-4, 3, 20), fit_intercept=False))
        ]),
        Pipeline([
            ('poly', PolynomialFeatures(degree=4)),
            ('linear',
             ElasticNetCV(alphas=np.logspace(-3, 2, 10), l1_ratio=(0.1, 0.3, 0.5, 0.7, 0.9), fit_intercept=False))
        ]),
    ]

    # 第四步 模型评估
    titles = ["线性回归", "Lasso回归", "Ridge回归", "ElasticNet回归"]
    for i in range(4):
        model = models[i]
        model.fit(x_train, y_train.ravel())
        y_hat = model.predict(x_test)
        print('%s,%s,均方差 MSE=%f, 均方根误差 RMSE=%f, trainingR2=%f ,testR2=%f'
              % ('第二组', titles[i], mean_squared_error(np.array(y_test),y_hat), np.sqrt(mean_squared_error(y_hat, y_test)),
                 model.score(x_train, y_train), model.score(x_test, y_test)))
