"""
- CRIM per capita crime rate by town
- CRIM 各城镇人均犯罪率
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- 面积超过 25,000 平方英尺的住宅用地的 ZN 比例。
- INDUS proportion of non-retail business acres per town
- 每个城镇非零售商业面积的 INDUS 比例
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- CHAS Charles River 虚拟变量（如果区域边界为河流，则 = 1；否则为 0）
- NOX nitric oxides concentration (parts per 10 million)
- NOX一氧化氮浓度（千万分之一）
- RM average number of rooms per dwelling
- 每间住宅的平均房间数 RM
- AGE proportion of owner-occupied units built prior to 1940
- 1940 年之前建造的自住单位的 AGE 比例
- DIS weighted distances to five Boston employment centres
- 到五个波士顿就业中心的 DIS 加权距离
- RAD index of accessibility to radial highways
- RAD 放射状高速公路可达性指数
- TAX full-value property-tax rate per $10,000
- 每 10,000 美元的 TAX 全值财产税税率
- PTRATIO pupil-teacher ratio by town
- PTRATIO 各城镇的师生比例
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- B 1000(Bk - 0.63)^2 其中 Bk 是按城镇划分的黑人比例
- LSTAT % lower status of the population
- LSTAT % 人口地位较低
- MEDV Median value of owner-occupied homes in $1000's
- MEDV 自住房屋的中值价值（1000 美元）
"""

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn import ensemble
from sklearn.linear_model import Lasso
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import r2_score as r2, mean_squared_error as mse, mean_absolute_error as mae

plt.style.use('ggplot')

# from sklearn.datasets import Load_boston
# dir(Load_boston())
# print(Load_boston().DESCR)

# 读取数据
df = pd.read_csv("housing.csv")
# print(df.info())
# print(df.head())

# 相关性校验
# plt.figure(figsize=(12, 8))
# print(sns.heatmap(df.corr(), annot=True, fmt='.2f', cmap="PuBu"))
# print(df.corr()['MEDV'].sort_values())
# sns.pairplot(df[["LSTAT", "RM", "PIRATIO", "MEDV"]])

x, y = df[df.columns.delete(-1)], df['MEDV']
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1, test_size=0.2)
# print(x_train.shape)
# print(y_train.shape)
# print(x_test.shape)
# print(y_test.shape)

# 数据标准化
ss_x = StandardScaler()
x_train = ss_x.fit_transform(x_train)
x_test = ss_x.transform(x_test)
ss_y = StandardScaler()
y_train = ss_y.fit_transform(y_train.values.reshape(-1, 1))
y_test = ss_y.transform(y_test.values.reshape(-1, 1))

linear_model = LinearRegression()
linear_model.fit(x_train, y_train)
coef = linear_model.coef_
line_pre = linear_model.predict(x_test)
print("测试数据集")
print('SCORE:{:.4f}'.format(linear_model.score(x_test, y_test)))
print('RMSE:{:.4f}'.format(np.sqrt(mean_squared_error(y_test, line_pre))))
# print(coef)
# 评价模型
plt.scatter(y_test, line_pre)
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=4, label='predicted')
# plt.show()

linear_model_all = LinearRegression()
linear_model_all.fit(x, y)
coef = linear_model_all.coef_
line_pre_all = linear_model_all.predict(x)
print("全数据集")
print('SCORE:{:.4f}'.format(linear_model_all.score(x, y)))
print('RMSE:{:.4f}'.format(np.sqrt(mean_squared_error(y, line_pre_all))))
# print(coef)
# 整个训练集
plt.scatter(y, line_pre_all)
plt.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4, label='predicted')
# plt.show()

# 探索模型优化，尝试使用部分特征进行训练
# print(df.corr()['MEDV'].abs().sort_values(ascending=False).head(4))
# X2 = np.array(df[['LSTAT', 'RM', 'PIRATIO']])
# X2_train, X2_test, y_train, y_test = train_test_split(X2, y, random_state=1, test_size=0.2)
# linear_model2 = LinearRegression()
# linear_model2.fit(X2_train, y_train)
# print(linear_model2.intercept_)
# print(linear_model2.coef_)
# line2_pre = linear_model2.predict(X2_test)  # 预测值
# print('SCORE:{:.4f}'.format(linear_model2.score(X2_test, y_test)))  # 模型评分
# print('RMSE:{:.4f}'.format(np.sqrt(mean_squared_error(y_test, line2_pre))))  # RMSE(标准误差)

# 梯度提升
# params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,'learning_rate': 0.01, 'loss': 'ls'}
# clf = ensemble.GradientBoostingRegressor(**params)
clf = ensemble.GradientBoostingRegressor()
clf.fit(x_train, y_train)
clf_pre = clf.predict(x_test)  # 预测值
print("梯度提升回归")
print('SCORE:{:.4f}'.format(clf.score(x_test, y_test)))  # 模型评分
print('RMSE:{:.4f}'.format(np.sqrt(mean_squared_error(y_test, clf_pre))))  # RMSE(标准误差)

# Lasso回归
lasso = Lasso()
lasso.fit(x_train, y_train)
y_predict_lasso = lasso.predict(x_test)
print("Lasso回归")
print('SCORE:{:.4f}'.format(lasso.score(x_test, y_test)))  # 模型评分
print('RMSE:{:.4f}'.format(np.sqrt(mean_squared_error(y_test, y_predict_lasso))))  # RMSE(标准误差)

# ElasticNet回归
enet = ElasticNet()
enet.fit(x_train, y_train)
y_predict_enet = enet.predict(x_test)
print("ElasticNet回归")
print('SCORE:{:.4f}'.format(enet.score(x_test, y_test)))  # 模型评分
print('RMSE:{:.4f}'.format(np.sqrt(mean_squared_error(y_test, y_predict_enet))))  # RMSE(标准误差)


# Support Vector Regression (SVR)
def svr_model(kernel):
    svr = SVR(kernel=kernel)
    svr.fit(x_train, y_train)
    y_predict = svr.predict(x_test)
    print("线性回归 ", kernel)
    print('SCORE:{:.4f}'.format(svr.score(x_test, y_test)))  # 模型评分
    print('RMSE:{:.4f}'.format(np.sqrt(mean_squared_error(y_test, y_predict))))  # RMSE(标准误差)
    # score(): Returns the coefficient of determination R^2 of the prediction.
    # print(kernel, ' SVR的默认衡量评估值值为：', svr.score(x_test, y_test))
    # print(kernel, ' SVR的R-squared值为：', r2(y_test, y_predict))
    # print(kernel, ' SVR的均方误差（mean squared error）为：', mse(y_test, y_predict))
    # print(kernel, ' SVR的平均绝对误差（mean absolute error）为：', mae(y_test, y_predict))
    # print(kernel,' SVR的均方误差（mean squared error）为：',mse(scalery.inverse_transform(y_test), scalery.inverse_transform(y_predict)))
    # print(kernel,' SVR的平均绝对误差（mean absolute error）为：',mae(scalery.inverse_transform(y_test),scalery.inverse_transform(y_predict)))


svr_model(kernel='linear')
svr_model(kernel='poly')
svr_model(kernel='rbf')

# 决策树回归
tree_reg = DecisionTreeRegressor(max_depth=2)
tree_reg.fit(x_train, y_train)
tree_reg_pre = tree_reg.predict(x_test)  # 预测值
print("决策树回归")
print('SCORE:{:.4f}'.format(tree_reg.score(x_test, y_test)))  # 模型评分
print('RMSE:{:.4f}'.format(np.sqrt(mean_squared_error(y_test, tree_reg_pre))))  # RMSE(标准误差)
