import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api
from sklearn import linear_model, model_selection
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.preprocessing import StandardScaler, PolynomialFeatures


# 绘制真实值和预测值对比图
def draw_infer_result(groud_truths, infer_results):
    title = 'Boston'
    plt.title(title, fontsize=24)
    x = np.arange(-0.2, 50)
    y = x
    plt.plot(x, y)
    plt.xlabel('ground truth', fontsize=14)
    plt.ylabel('infer result', fontsize=14)
    plt.scatter(groud_truths, infer_results, color='green', label='training cost')
    plt.grid()
    plt.show()


# 读取数据
df = pd.read_csv("boston_housing.csv", dtype=float)
# 分割特征和标签
x, y = np.split(df.values, (df.shape[1] - 1,), axis=1)
# 分割训练数据和测试数据
x_train, x_test, y_train, y_test = model_selection.train_test_split(x, y, test_size=0.1, random_state=1)
features = df.columns.values


# # 每个特征与房价的关系
# for i in range(len(features) - 1):
#     plt.scatter(x_train[:, i], y_train, s=10)  # 横纵坐标和点的大小
#     plt.title(features[i])
#     plt.show()
#     print(features[i], np.corrcoef(x_train[:, i]))
#
# # 线性回归 - 工具类
# lr = linear_model.LinearRegression()
# lr.fit(x_train, y_train)
# y_pred = lr.predict(x_test)
# lr_score = r2_score(y_test, y_pred)
# draw_infer_result(y_test, y_pred)
# print("score of lr", lr_score)
#
# # 标签的分布情况
# for i in range(len(y_train)):
#     plt.scatter(i, y_train[i], s=10)  # 横纵坐标和点的大小
# plt.show()
#
# # 根据散点图分析, 下列房屋的特征与房价的相关性最大
# # RM（每栋住宅的房间数）
# # LSTAT（地区中有多少房东属于低收入人群）
# # PTRATIO(城镇中的教师学生比例）
# i_ = []
# for i in range(len(y)):
#     if y[i] == 50:
#         i_.append(i)  # 存储房价等于50 的异常值下标
# x = np.delete(x, i_, axis=0)  # 删除房价异常值数据
# y = np.delete(y, i_, axis=0)  # 删除异常值
# j_ = []
# for i in range(13):
#     if features[i] == 'RM' or features[i] == 'PTRATIO' or features[i] == 'LSTAT':
#         continue
#     j_.append(i)  # 存储其他次要特征下标
# x = np.delete(x, j_, axis=1)  # 在总特征中删除次要特征
# print(np.shape(y))
# print(np.shape(x))
# x_train, x_test, y_train, y_test = model_selection.train_test_split(x, y, test_size=0.1, random_state=1)
#
# # 梯度提升回归
# gbr = GradientBoostingRegressor()
# gbr.fit(x_train, y_train)
# # 预测
# y_pred = gbr.predict(x_test)
# print(gbr.score(x_test, y_test))
# print(r2_score(y_test, y_pred))

# 多项式线性回归，根据RM,LSTAT,PTRATIO预测
# x_train = x_train[:, (5, 10, 12)]
# x_test = x_test[:, (5, 10, 12)]

# print(x_train[0:10])
# print(y_train[0:10])
# 特征缩放处理
scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)


def draw(X, y):
    X = X[:, -1].reshape(-1, 1)
    # 创建多项式特征
    poly = PolynomialFeatures(degree=2)
    # 获取多项式转换关系
    X_poly = poly.fit_transform(X)
    # 添加常数项
    X_poly = statsmodels.api.add_constant(X_poly)
    # 训练线性回归模型
    model = LinearRegression()
    model.fit(X_poly, y.ravel())
    # 生成用于绘图的X的值
    x_min, x_max = X.min(), X.max()
    # 保持 X_plot 为二维数组
    X_plot = np.linspace(x_min, x_max, 100).reshape(-1, 1)
    # 应用相同的多项式转换
    X_plot_poly = poly.transform(X_plot)
    # 绘制原始数据点
    plt.scatter(X, y, color='blue', label='Original data')
    # 绘制拟合的多项式曲线
    y_plot = model.predict(X_plot_poly)
    plt.plot(X_plot, y_plot, color='red', label='Quadratic fit')
    plt.title('Quadratic Polynomial Regression')
    plt.xlabel('X')
    plt.ylabel('y')
    plt.legend()
    plt.show()


# draw(x_train, y_train)


# 加载波士顿房价数据集
X, y = np.split(df.values, (df.shape[1] - 1,), axis=1)
# 进行2阶多项式扩展
poly = PolynomialFeatures(degree=2, include_bias=False)
X_poly = poly.fit_transform(X)
# 定义LinearRegression、Lasso、Ridge、ElasticNet模型
lr = LinearRegression()
lasso = Lasso(alpha=1.0)
ridge = Ridge(alpha=100.0)
elastic_net = ElasticNet()
# 定义5折交叉验证
cv = KFold(n_splits=5, shuffle=True, random_state=1)
# 使用交叉验证评估模型性能
scores_lr = cross_val_score(lr, X_poly, y, scoring='r2', cv=cv)
scores_lasso = cross_val_score(lasso, X_poly, y, scoring='r2', cv=cv)
scores_ridge = cross_val_score(ridge, X_poly, y, scoring='r2', cv=cv)
scores_elastic_net = cross_val_score(elastic_net, X_poly, y, scoring='r2', cv=cv)
# 计算交叉验证的R平方分数
r2_lr = np.mean(scores_lr)
r2_lasso = np.mean(scores_lasso)
r2_ridge = np.mean(scores_ridge)
r2_elastic_net = np.mean(scores_elastic_net)
# 输出每个模型的R平方分数
print('Linear Regression R2:', r2_lr)
print('Lasso R2:', r2_lasso)
print('Ridge R2:', r2_ridge)
print('ElasticNet R2:', r2_elastic_net)
