﻿import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso

plt.figure(figsize=(14, 6))
np.random.seed(42)

# 数据
m = 20
X = 3*np.random.rand(m, 1) # 产生20个0到3之间的随机数
y = 0.5*X+np.random.randn(m, 1)/1.5+1
X_new = np.linspace(0, 3, 100).reshape(100, 1)


def plot_model(model_class, polynomial, alphas, **model_kargs):
    for alpha, style in zip(alphas, ('b-', 'g--', 'r:')):
        model = model_class(alpha, **model_kargs)
        if polynomial:
            model = Pipeline([('poly_features', PolynomialFeatures(degree=10, include_bias=False)),
                              ('std', StandardScaler()),
                              ('lin_reg', model)])
        model.fit(X, y)
        y_new_regularized = model.predict(X_new)
        lw = 2 if alpha > 0 else 1
        plt.plot(X_new, y_new_regularized, style, linewidth=lw, label='alpha = {}'.format(alpha))
    plt.plot(X, y, 'b.', linewidth=3)
    plt.legend()


plt.subplot(121)
plot_model(Lasso, polynomial=False, alphas=(0, 0.1, 1))
plt.axis([0, 3, 0, 3.5])
plt.subplot(122)
plot_model(Lasso, polynomial=True, alphas=(0, 10**-1, 1))
plt.axis([0, 3, 0, 3.5])
plt.show()


'''
Lasso ：增加正则项，用于解决过拟合问题，类似与岭回归，只不过正则项不一样
（惩罚力度越大）alpha值越大，正则项影响越大，模型越平稳
'''