# __author__ = 'heyin'
# __date__ = '2018/11/21 15:13'
from pyecharts import Line
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.linear_model import LinearRegression, SGDRegressor, Ridge
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV


def bubble_sort(nums, nums1):
    for i in range(len(nums) - 1):  # 这个循环负责设置冒泡排序进行的次数
        for j in range(len(nums) - i - 1):  # ｊ为列表下标
            if nums[j] > nums[j + 1]:
                nums[j], nums[j + 1] = nums[j + 1], nums[j]
                nums1[j], nums1[j + 1] = nums1[j + 1], nums1[j]
    return nums, nums1


def l():
    b = load_boston()
    x = b.data
    y = b.target
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=1)

    lr = LinearRegression(normalize=True)
    # ret = cross_val_score(lr, x_train, y_train, cv=5)
    # print(ret)
    # 没有超参数需要调优，因此交叉验证的意义没了

    lr.fit(x_train, y_train)
    y_pred = lr.predict(x_test)
    y_pred2 = lr.predict(x_train)
    #
    # # 效果评估
    # # 通过均方误差的大小，是无法直观感受预测效果的
    print('测试集均方误差为：', mean_squared_error(y_test, y_pred))
    print('训练集均方误差为：', mean_squared_error(y_train, y_pred2))

    # 绘制折线图展示
    # x_axis = list(range(1, y_test.shape[0] + 1))
    # line = Line("线性回归测试集预测结果可视化", width=1200)
    # y_test_s, y_pred_s = bubble_sort(y_test, y_pred)
    # line.add("真实值", x_axis, y_test_s, is_smooth=True)
    # line.add("预测值", x_axis, y_pred_s, is_smooth=True)
    # line.render(path='./echart_html/线性回归测试集预测结果可视化.html')

    # x_axis = list(range(1, y_train.shape[0] + 1))
    # line = Line("线性回归训练集预测结果可视化", width=1200)
    # y_test_s, y_pred_s = bubble_sort(y_train, y_pred2)
    # line.add("真实值", x_axis, y_test_s, is_smooth=True)
    # line.add("预测值", x_axis, y_pred_s, is_smooth=True)
    # line.render(path='./echart_html/线性回归训练集预测结果可视化.html')


def l2():
    b = load_boston()
    x = b.data
    y = b.target
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=1)
    print(x_train)
    # 手动进行标准化处理
    xs = StandardScaler()
    x_train = xs.fit_transform(x_train)
    x_test = xs.transform(x_test)
    ys = StandardScaler()
    y_train = ys.fit_transform(y_train.reshape((-1, 1)))
    print(type(y_test))
    y_test = ys.transform(y_test.reshape((-1, 1)))

    lr = LinearRegression()
    lr.fit(x_train, y_train)
    y_pred = lr.predict(x_test)
    print(ys.inverse_transform(y_pred).flatten())


def sgd():
    # 梯度下降方式求解，上边的是正规方程
    # 梯度下降适合用于大数据集，超10w的规模，正规方程适合小数据集，梯度下降的计算速度更快
    b = load_boston()
    x = b.data
    y = b.target
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=1)

    # 手动进行标准化处理
    xs = StandardScaler()
    x_train = xs.fit_transform(x_train)
    x_test = xs.transform(x_test)
    ys = StandardScaler()
    y_train = ys.fit_transform(y_train.reshape((-1, 1)))
    y_test = ys.transform(y_test.reshape((-1, 1)))

    lr = SGDRegressor()
    lr.fit(x_train, y_train.flatten())
    y_pred = lr.predict(x_test)

    # # 效果评估
    # # 通过均方误差的大小，是无法直观感受预测效果的
    print('测试集均方误差为：', mean_squared_error(ys.inverse_transform(y_test), ys.inverse_transform(y_pred)))


def ridge():
    b = load_boston()
    x = b.data
    y = b.target
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=1)

    lr = Ridge(normalize=True, alpha=0.01)

    lr.fit(x_train, y_train)
    y_pred = lr.predict(x_test)
    y_pred2 = lr.predict(x_train)
    #
    # # 效果评估
    # # 通过均方误差的大小，是无法直观感受预测效果的
    print('测试集均方误差为：', mean_squared_error(y_test, y_pred))
    print('训练集均方误差为：', mean_squared_error(y_train, y_pred2))

if __name__ == '__main__':
    # 两种方式得到的结果一模一样，因此使用线性回归时，不需要先进行标准化，而直接让线性回归来实现标准化处理
    # l()
    # print('*' * 50)
    l2()
    # sgd()
    # ridge()