import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn import preprocessing
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures

if __name__ == '__main__':
    path = 'data/boston_housing_data.csv'
    boston_df = pd.read_csv(path)

    # 可修改特征array: 要分割的数组; indices_or_sections: 一个整数或者序列，表示分割的方式：axis: 指定沿着哪个轴进行分割。默认是0，表示沿着第一个轴（通常是行）进行分割。如果设置为1，则沿着第二个轴（通常是列）进行分割。
    x, y = np.split(boston_df, (13,), axis=1)

    print('样本个数：%d, 特征个数：%d' % x.shape)

    # 看协方差矩阵
    corr = boston_df.corr()
    # 计算每一个特征和房价的相关系数
    print(boston_df.corr()['MEDV'])
    print(corr)
    # 查看数据是否存在空值，从结果来看数据不存在空值。
    print(boston_df.isnull().sum())
    # 查看数据大小
    print("============查看数据的大小===============")
    print(boston_df.shape)
    # 显示数据前5行
    print("============查看数据的前五行===============")
    print(boston_df.head())
    # 查看数据的描述信息，在描述信息里可以看到每个特征的均值，最大值，最小值等信息。
    print("============查看数据的描述信息===============")
    print(boston_df.describe())

    # 看散点图，,看特征与标签的关系
    # RM 和房价的散点图
    # plt.figure(facecolor='gray')
    # plt.scatter(boston_df['RM'], boston_df['MEDV'], s=30, edgecolor='white')
    # plt.title('RM')
    # plt.xlabel('RM')
    # plt.ylabel('MEDV')
    # plt.show()

    # 多一步：选择合适的特征
    selector = SelectKBest(score_func=f_regression, k=12)
    X_selected = selector.fit_transform(x, y)

    selected_features = list(x.columns[selector.get_support()])
    print("selected_features:{}".format(selected_features))

    # pca = PCA(n_components=5)
    # newArray = pca.fit_transform(x)

    x = boston_df[selected_features]
    x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=1)

    # 初始化标准化器
    min_max_scaler = preprocessing.MinMaxScaler()
    # 分别对训练和测试数据的特征以及目标值进行标准化处理
    x_train = min_max_scaler.fit_transform(x_train)
    y_train = min_max_scaler.fit_transform(y_train)  # reshape(-1,1)指将它转化为1列，行自动确定
    x_test = min_max_scaler.fit_transform(x_test)
    y_test = min_max_scaler.fit_transform(y_test)

    # lr = LinearRegression()

    # line_reg = LinearRegression()

    # line_reg = LinearRegression()
    # model = line_reg.fit(x_train, y_train)

    # L1正则化
    lasso_model = Lasso(alpha=0.00001)
    lasso_model.fit(x_train, y_train)
    mse_lasso = mean_squared_error(y_test, lasso_model.predict(x_test))
    print("mse_lasso=", mse_lasso)

    # L2正则化
    ridge_model = Ridge(alpha=0.00001)
    ridge_model.fit(x_train, y_train)
    mse_ridge = mean_squared_error(y_test, ridge_model.predict(x_test))
    print("ridge_mse=", mse_ridge)
'''
    for d in [2, 3, 4, 5, 6, 7, 8, 9]:
        poly_feature = PolynomialFeatures(degree=d, interaction_only=False, include_bias=False)
        model = make_pipeline(poly_feature, LinearRegression(normalize=True, fit_intercept=False))
        line_reg = model.fit(x_train, y_train)

        # print(model)

        y_hat = line_reg.predict(x_test)
        mse = np.average((y_hat - np.array(y_test)) ** 2)  # Mean Squared Error
        rmse = np.sqrt(mse)  # Root Mean Squared Error
        print("------------第二组- -----------,degree is ", d)
        print('MSE = ', mse, end=' ')
        print('RMSE = ', rmse)
        print('trainingR^2 = ', line_reg.score(x_train, y_train))
        print('testR^2 = ', line_reg.score(x_test, y_test))
'''
