"""
1.获取数据
2.数据基本处理
    2.1.分割数据
3.特征工程：标准化
4.机器学习：线性回归
5.模型评估
"""
import joblib
import numpy as np
import pandas as pd

# from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, SGDRegressor, Ridge, RidgeCV, Lasso, ElasticNet, ElasticNetCV
from sklearn.metrics import mean_squared_error


def linerModel1():
    """
    线性回归:正规方程
    :return:None
    """
    # 1.获取数据
    data_url = "http://lib.stat.cmu.edu/datasets/boston"
    raw_df = pd.read_csv(data_url, sep="\\s+", skiprows=22, header=None)
    data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
    # print(data)
    target = raw_df.values[1::2, 2]
    # 2.数据基本处理：分割数据
    x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.2)

    # 3.特征工程：标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.fit_transform(x_test)

    # 4.机器学习：线性回归
    estimator = LinearRegression()
    estimator.fit(x_train, y_train)

    # 5.模型评估
    # 5.1.预测值
    y_pre = estimator.predict(x_test)
    print("预测值是：\n", y_pre)
    print("这个模型的偏置是：\n", estimator.intercept_)
    print("这个模型的系数是：\n", estimator.coef_)

    # 5.2.均方误差
    err = mean_squared_error(y_test, y_pre)
    print("均方误差（越小越好）:\t", err)

    return None


def linerModel2():
    """
    线性回归：梯度下降法
    :return: None
    """
    # 1.获取数据
    data_url = "http://lib.stat.cmu.edu/datasets/boston"
    raw_df = pd.read_csv(data_url, sep="\\s+", skiprows=22, header=None)
    data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
    # print(data)
    target = raw_df.values[1::2, 2]
    # 2.数据基本处理：分割数据
    x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.01)

    # 3.特征工程：标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.fit_transform(x_test)

    # 4.机器学习：梯度下降法
    # max_iter不写会有提示，
    # learning_rate 误差会很大，一般不用
    # eta0 误差会很大，一般不用
    # estimator = SGDRegressor(max_iter=1000, learning_rate="constant", eta0=0.1)
    estimator = SGDRegressor(max_iter=1000)
    estimator.fit(x_train, y_train)

    # 5.模型评估
    # 5.1.预测值
    y_pre = estimator.predict(x_test)
    print("预测值是：\n", y_pre)
    print("这个模型的偏置是：\n", estimator.intercept_)
    print("这个模型的系数是：\n", estimator.coef_)

    # 5.2.均方误差
    err = mean_squared_error(y_test, y_pre)
    print("均方误差（越小越好）:\t", err)

    return None


def linerModel3():
    """
    线性回归：岭回归
    :return: None
    """
    # 1.获取数据
    data_url = "http://lib.stat.cmu.edu/datasets/boston"
    raw_df = pd.read_csv(data_url, sep="\\s+", skiprows=22, header=None)
    data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
    # print(data)
    target = raw_df.values[1::2, 2]
    # 2.数据基本处理：分割数据
    x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.01)

    # 3.特征工程：标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.fit_transform(x_test)

    # 4.机器学习：岭回归
    # 交叉验证alpha=(0.1, 1, 10) 让电脑自己选一个最优值
    estimator = RidgeCV(alphas=(0.1, 1, 10))
    # alpha（正则化）越小，权重力度越小，反之亦然。范围[0~1]&(1,10]
    # estimator = Ridge(alpha=1)
    estimator.fit(x_train, y_train)

    # 5.模型评估
    # 5.1.预测值
    y_pre = estimator.predict(x_test)
    print("预测值是：\n", y_pre)
    print("这个模型的偏置是：\n", estimator.intercept_)
    print("这个模型的系数是：\n", estimator.coef_)

    # 5.2.均方误差
    err = mean_squared_error(y_test, y_pre)
    print("均方误差（越小越好）:\t", err)

    return None


def linerModel4():
    """
    线性回归：Lasso回归
    :return: None
    """
    # 1.获取数据
    data_url = "http://lib.stat.cmu.edu/datasets/boston"
    raw_df = pd.read_csv(data_url, sep="\\s+", skiprows=22, header=None)
    data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
    # print(data)
    target = raw_df.values[1::2, 2]
    # 2.数据基本处理：分割数据
    x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.01)

    # 3.特征工程：标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.fit_transform(x_test)

    # 4.机器学习：Lasso法
    estimator = Lasso(alpha=0.1)
    estimator.fit(x_train, y_train)

    # 5.模型评估
    # 5.1.预测值
    y_pre = estimator.predict(x_test)
    print("预测值是：\n", y_pre)
    print("这个模型的偏置是：\n", estimator.intercept_)
    print("这个模型的系数是：\n", estimator.coef_)

    # 5.2.均方误差
    err = mean_squared_error(y_test, y_pre)
    print("均方误差（越小越好）:\t", err)

    return None


def linerModel5():
    """
    线性回归：ElasticNet回归
    :return: None
    """
    # 1.获取数据
    data_url = "http://lib.stat.cmu.edu/datasets/boston"
    raw_df = pd.read_csv(data_url, sep="\\s+", skiprows=22, header=None)
    data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
    # print(data)
    target = raw_df.values[1::2, 2]
    # 2.数据基本处理：分割数据
    x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.01)

    # 3.特征工程：标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.fit_transform(x_test)

    # 4.机器学习：Lasso法
    # estimator = ElasticNet(alpha=0.1)
    estimator = ElasticNetCV(alphas=(0.001, 0.1, 1, 5, 10))
    estimator.fit(x_train, y_train)

    # 5.模型评估
    # 5.1.预测值
    y_pre = estimator.predict(x_test)
    print("预测值是：\n", y_pre)
    print("这个模型的偏置是：\n", estimator.intercept_)
    print("这个模型的系数是：\n", estimator.coef_)

    # 5.2.均方误差
    err = mean_squared_error(y_test, y_pre)
    print("均方误差（越小越好）:\t", err)

    return None


def saveModel():
    # 1.获取数据
    data_url = "http://lib.stat.cmu.edu/datasets/boston"
    raw_df = pd.read_csv(data_url, sep="\\s+", skiprows=22, header=None)
    data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
    target = raw_df.values[1::2, 2]
    # 2.数据基本处理：分割数据
    x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.01)
    # 3.特征工程：标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.fit_transform(x_test)
    # 4.机器学习
    estimator = ElasticNetCV(alphas=(0.001, 0.1, 1, 5, 10))
    estimator.fit(x_train, y_train)
    # 4.1当机器学习完后，需要保存模型
    joblib.dump(estimator, "d://data//test.pkl")
    # 5.模型评估
    # 5.1.预测值
    y_pre = estimator.predict(x_test)
    print("预测值是：\n", y_pre)
    print("这个模型的偏置是：\n", estimator.intercept_)
    print("这个模型的系数是：\n", estimator.coef_)
    # 5.2.均方误差
    err = mean_squared_error(y_test, y_pre)
    print("均方误差（越小越好）:\t", err)
    return None


def loadModel():
    # 1.获取数据
    data_url = "http://lib.stat.cmu.edu/datasets/boston"
    raw_df = pd.read_csv(data_url, sep="\\s+", skiprows=22, header=None)
    data = np.hstack([raw_df.values[::2, :], raw_df.values[1::2, :2]])
    target = raw_df.values[1::2, 2]
    # 2.数据基本处理：分割数据
    x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.01)
    # 3.特征工程：标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.fit_transform(x_test)
    # 4.机器学习，读取模型
    estimator = joblib.load("d://data//test.pkl")
    # 5.模型评估
    # 5.1.预测值
    y_pre = estimator.predict(x_test)
    print("预测值是：\n", y_pre)
    print("这个模型的偏置是：\n", estimator.intercept_)
    print("这个模型的系数是：\n", estimator.coef_)
    # 5.2.均方误差
    err = mean_squared_error(y_test, y_pre)
    print("均方误差（越小越好）:\t", err)
    return None


# linerModel1()
# linerModel2()
# linerModel3()
linerModel4()
# linerModel5()
