import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.linear_model import LinearRegression, LassoCV, RidgeCV, ElasticNetCV
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures

import warnings

warnings.filterwarnings('ignore')

if __name__ == '__main__':
    path = 'data/boston_housing_data.csv'
    boston_df = pd.read_csv(path)

    # 可修改特征array: 要分割的数组; indices_or_sections: 一个整数或者序列，表示分割的方式：axis: 指定沿着哪个轴进行分割。默认是0，表示沿着第一个轴（通常是行）进行分割。如果设置为1，则沿着第二个轴（通常是列）进行分割。
    x, y = np.split(boston_df, (13,), axis=1)

    print('样本个数：%d, 特征个数：%d' % x.shape)

    # 看协方差矩阵
    corr = boston_df.corr()
    # 计算每一个特征和房价的相关系数
    print(boston_df.corr()['MEDV'])
    print(corr)
    # 查看数据是否存在空值，从结果来看数据不存在空值。
    print(boston_df.isnull().sum())

    # 多一步：选择合适的特征
    selector = SelectKBest(score_func=f_regression, k=12)
    X_selected = selector.fit_transform(x, y)
    #
    selected_features = list(x.columns[selector.get_support()])
    print("selected_features:{}".format(selected_features))
    #
    # x = boston_df[selected_features]

    x = boston_df[['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PIRATIO', 'B', 'LSTAT']]
    # x = boston_df[[ 'RM','LSTAT','ZN','AGE','DIS','CHAS']]
    # x = boston_df[['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PIRATIO', 'B', 'LSTAT']]
    # x = boston_df[['RM', 'LSTAT', 'TAX', 'NOX']]
    # x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=1)

    x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.75, random_state=1)

    # 初始化标准化器
    min_max_scaler = preprocessing.MinMaxScaler()
    # 分别对训练和测试数据的特征以及目标值进行标准化处理
    x_train = min_max_scaler.fit_transform(x_train)
    y_train = min_max_scaler.fit_transform(y_train)  # reshape(-1,1)指将它转化为1列，行自动确定
    x_test = min_max_scaler.fit_transform(x_test)
    y_test = min_max_scaler.fit_transform(y_test)

    '''
    models = [Pipeline([
        ('poly', PolynomialFeatures(degree=2, include_bias=False)),
        ('linear', LinearRegression(fit_intercept=False))]),
        Pipeline([
            ('poly', PolynomialFeatures(degree=2, include_bias=False)),
            ('lasso', LassoCV(alphas=np.logspace(-3, 2, base=10), fit_intercept=False))]),
        Pipeline([
            ('poly', PolynomialFeatures(degree=2, include_bias=False)),
            ('ridge', RidgeCV(alphas=np.logspace(-3, 2, base=10), fit_intercept=False))]),
        Pipeline([
            ('poly', PolynomialFeatures(degree=2, include_bias=False)),
            ('ElasticNET',
             ElasticNetCV(alphas=np.logspace(-3, 2, base=10), l1_ratio=(0.1, 0.5, 0.7, 0.9), fit_intercept=False))]),

    ]
    '''

    model_arr = [
        Pipeline([
            ('poly', PolynomialFeatures(degree=2)),
            ('linear', LinearRegression(fit_intercept=False))
        ]),
        Pipeline([
            ('poly', PolynomialFeatures(degree=4)),
            ('linear', LassoCV(alphas=np.logspace(-4, 3, 20), fit_intercept=False))
        ]),
        Pipeline([
            ('poly', PolynomialFeatures(degree=10)),
            ('linear', RidgeCV(alphas=np.logspace(-4, 3, 20), fit_intercept=False))
        ]),
        Pipeline([
            ('poly', PolynomialFeatures(degree=4)),
            ('linear',
             ElasticNetCV(alphas=np.logspace(-3, 2, 10), l1_ratio=(0.1, 0.3, 0.5, 0.7, 0.9), fit_intercept=False))
        ]),
    ]

    model_name_arr = ['Linear', 'LASSO', 'RIDGE', 'ElasticNetCV']

    for i in range(4):
        model = model_arr[i]
        model.fit(x_train, y_train)
        y_hat = model.predict(x_test)
        # print("------------第二组- -----------,degree is ")
        # print("------------ model: ", model_name_arr[i], "-----------")
        # mse = np.average((y_hat - np.array(y_test)) ** 2)  # Mean Squared Error
        mse = mean_squared_error(y_hat, y_test)
        mse2 = mean_squared_error(y_hat, y_test)
        rmse = np.sqrt(mse)  # Root Mean Squared Error
        print("Model: ", model_name_arr[i],
              '| MSE = ', mse, mse2,
              '| RMSE = ', rmse,
              '| training R^2 = ', model.score(x_train, y_train),
              '| test R^2 = ', model.score(x_test, y_test))
        # print('RMSE = ', rmse)
        # print('trainingR^2 = ', model.score(x_train, y_train))
        # print('testR^2 = ', model.score(x_test, y_test))
