print(__doc__)


# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause


import numpy as np
import matplotlib.pyplot as plt

from sklearn import linear_model

X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T

# np.random.seed(0)
print(np.random.normal(size=(2, 1)))
classifiers = dict(ols=linear_model.LinearRegression(),
                   ridge=linear_model.Ridge(alpha=.1))
i = 0
fig, ax = plt.subplots(1, 2, figsize=(8, 3))
for name, clf in classifiers.items():
    for _ in range(6):
        this_X = .1 * np.random.normal(size=(2, 1)) + X_train

        clf.fit(this_X, y_train)

        ax[i].plot(X_test, clf.predict(X_test), color='gray')
        ax[i].scatter(this_X, y_train, s=3, c='gray', marker='o', zorder=10)

    clf.fit(X_train, y_train)
    ax[i].plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
    ax[i].scatter(X_train, y_train, s=30, c='red', marker='+', zorder=10)

    ax[i].set_title(name)
    ax[i].set_xlim(0, 2)
    ax[i].set_ylim((0, 1.6))
    ax[i].set_xlabel('X')
    ax[i].set_ylabel('y')

    fig.tight_layout()
    i = i+1;
plt.show()