import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

from sklearn.model_selection import train_test_split  # 这里是引用了交叉验证
from sklearn.linear_model import LinearRegression  # 线性回归


pd_data = pd.read_csv('train.csv')  # 原始数表
X = pd_data.loc[:, ('x1', 'x2', 'x3', 'x4')]
y = pd_data.loc[:, 'Y']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)  # 选择20%为测试集
print('训练集测试及参数:')
print('X_train.shape={}\n y_train.shape ={}\n X_test.shape={}\n y_test.shape={}'.format(X_train.shape,
                                                                                        y_train.shape,
                                                                                        X_test.shape,
                                                                                        y_test.shape))
linreg = LinearRegression()
# 训练
model = linreg.fit(X_train, y_train)
print('模型参数:')
print(model)
# 训练后模型截距
print('模型截距:')
bias = linreg.intercept_
print(bias)
# 训练后模型权重（特征个数无变化）
print('参数权重:')
weight = linreg.coef_
print(weight)

y_pred = linreg.predict(X_test)
sum_mean = 0
for i in range(len(y_pred)):
    sum_mean += (y_pred[i] - y_test.values[i]) ** 2
sum_erro = np.sqrt(sum_mean / len(y_pred))  # 测试级的数量
# calculate RMSE
print("RMSE by hand:", sum_erro)

# print('线性回归方程：y = ' + str(bias) + ' + ' + str(weight[0]) + ' * x1 + ' + str(weight[1]) + ' * x2 + ' + str(
#     weight[2]) + ' * x3 + ' + str(weight[3]) + ' * x4')

print('线性回归方程：y = {:.2f} + {:.2f} * x1 + {:.2f} * x2 + {:.2f} * x3 + {:.2f} * x4'.format(bias, weight[0], weight[1],
                                                                                         weight[2], weight[3]))
