import numpy as np
import pandas as pd

from matplotlib import pyplot as plt
from sklearn.datasets import load_diabetes
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split

diabetes = load_diabetes()
data = diabetes['data']
target = diabetes['target']
feature_names = diabetes['feature_names']
df = pd.DataFrame(data, columns=feature_names)

train_X, test_X, train_Y, test_Y = train_test_split(data, target, train_size=0.8)

model = LinearRegression()
model.fit(train_X, train_Y)
LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False)

y_pred = model.predict(test_X)
sum_mean = 0
for i in range(len(y_pred)):
    sum_mean += (y_pred[i] - test_Y[i]) ** 2
sum_erro = np.sqrt(sum_mean / len(y_pred))  # 测试级的数量

print("RMSE by hand: {}%".format(sum_erro))

score = model.score(test_X, test_Y)
print("sklearn score: {}%".format(score * 100))

'''
考察单个特征值与结果之间的关系，以图表形式展示
'''

# 1、取出特征值
print(df.columns)

# 2、循环对每个特征值进行建模训练，作图

# 建立画板，作图5行2列的图
plt.figure(figsize=(2 * 6, 5 * 5))
for i, col in enumerate(df.columns):  # enumerate 枚举
    train_X = np.asarray(df.loc[:, col]).reshape(-1, 1)
    # 每一次循环，都取出datafram中的一列数据，是一维Series数据格式，但是线性回归模型要求传入的是一个二维数据，因此利用reshape修改其形状
    train_Y = target
    linear_model = LinearRegression()  # 构建模型
    linear_model.fit(train_X, train_Y)  # 训练模型
    score = linear_model.score(train_X, train_Y)  # 评估模型
    #  以训练数据为X轴，标记为Y 轴，画出散点图，直观地看每个特征和标记直接的关系
    axes = plt.subplot(5, 2, i + 1)
    plt.scatter(train_X, train_Y)
    # 画出每一个特征训练模型得到的拟合直线 y= kx + b
    k = linear_model.coef_  # 回归系数
    b = linear_model.intercept_  # 截距
    x = np.linspace(train_X.min(), train_X.max(), 100)
    y = k * x + b
    # 作图
    plt.plot(x, y, c='red')
    axes.set_title(col + ':' + str(score))
plt.show()
