from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
import numpy as np
import matplotlib.pyplot as plt

x, y = load_boston(True)
x = StandardScaler().fit_transform(x)
y = StandardScaler().fit_transform(y.reshape(-1, 1)).ravel()

x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, random_state=666)

model = LinearRegression()
model.fit(x_train, y_train)
print(f'Training score = {model.score(x_train, y_train)}')
print(f'Testing score = {model.score(x_test, y_test)}')

spr = 2
spc = 2
spn = 0
plt.figure(figsize=[12, 12])


def x_draw_reg(x, y, x_idx, model):
    """
    Try to visualize the model and the data. But this approach is meaningless.
    The teacher said I can decompose it to 2-d features by PCA and do plot in 3-d, instead of this approach.
    """
    global spn
    spn += 1
    plt.subplot(spr, spc, spn)
    plt.ylabel('target')
    x_label = 'X' + str(x_idx)
    plt.xlabel(x_label)
    plt.grid()
    plt.scatter(x[:, x_idx], y, label=x_label)
    x_plt = np.array([np.min(x[:, x_idx]), np.max(x[:, x_idx])])
    theta0 = model.intercept_
    theta_i = model.coef_[x_idx]
    y_plt = theta0 + x_plt * theta_i
    plt.plot(x_plt, y_plt, 'r-')


for i in range(3):
    x_draw_reg(x, y, i, model)

spn += 1
plt.subplot(spr, spc, spn)
h_test = model.predict(x_test)
plt.scatter(y_test, y_test, label='target values')
plt.scatter(y_test, h_test, label='hypothesis values')
plt.grid()
plt.legend()

plt.show()
