import pandas as pd
import numpy as np
from keras.src.layers import Dropout
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from skopt import gp_minimize, space
from skopt.utils import use_named_args
from skopt.plots import plot_convergence
from keras.models import Sequential
from keras.layers import LSTM, Dense
from keras.callbacks import History
from matplotlib import pyplot as plt
# 设置matplotlib的字体为支持中文的字体
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']  # 指定默认字体为微软雅黑
plt.rcParams['axes.unicode_minus'] = False  # 解决保存图像时负号显示为方块的问题
# 步骤 1: 数据预处理
df = pd.read_excel('min-max标准化_二手房均价广东广州市当周值等.xlsx')
# 删除不需要的列
df = df.drop(['指标名称'], axis=1)

# 划分特征X和目标Y
X = df.drop('天河区', axis=1)  # 特征X
y = df['天河区']  # 目标Y

# 数据min-max标准化过，跳过标准化步骤
X = df.drop('天河区', axis=1)  # 特征X
y = df['天河区']  # 目标Y

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)


# 步骤 2: 使用贝叶斯优化来找到XGBoost模型的最佳参数
def objective(params):
    xgb_reg = XGBRegressor(**params)
    xgb_reg.fit(X_train, y_train)
    y_pred = xgb_reg.predict(X_test)
    return -r2_score(y_test, y_pred)  # 使用负R2作为优化目标，因为要最小化损失


# 定义要优化的参数空间
space_params = [
    space.Real(1e-3, 1e0, name='learning_rate', prior='log-uniform'),  # 学习率
    space.Integer(100, 1000, name='n_estimators'),  # 树的数量
    space.Integer(3, 10, name='max_depth'),  # 树的最大深度
    space.Real(0.1, 1.0, name='subsample'),  # 用于训练每个树的数据子集比例
    space.Real(0.1, 1.0, name='colsample_bytree'),  # 用于训练每个树的列子集比例
    space.Real(0.1, 1, name='gamma'),  # 控制叶子节点的分裂所需的损失减少的最小值
    space.Real(0.1, 10, name='reg_alpha', prior='log-uniform'),  # L1正则化项
    space.Real(0.1, 10, name='reg_lambda', prior='log-uniform'),  # L2正则化项
]


# 使用贝叶斯优化
@use_named_args(space_params)
def optimize_xgb(learning_rate, n_estimators, max_depth, subsample, colsample_bytree, gamma, reg_alpha, reg_lambda):
    return objective({
        'learning_rate': learning_rate,
        'n_estimators': n_estimators,
        'max_depth': max_depth,
        'subsample': subsample,
        'colsample_bytree': colsample_bytree,
        'gamma': gamma,
        'reg_alpha': reg_alpha,
        'reg_lambda': reg_lambda
    })


res_gp = gp_minimize(optimize_xgb, space_params, n_calls=50, random_state=0)

# 打印最佳参数
print('Best parameters found by Bayesian Optimization:', res_gp.x)

# 使用最佳参数训练XGBoost模型
best_params = dict(
    zip(['learning_rate', 'n_estimators', 'max_depth', 'subsample', 'colsample_bytree', 'gamma', 'reg_alpha',
         'reg_lambda'], res_gp.x))
xgb_reg = XGBRegressor(**best_params)
xgb_reg.fit(X_train, y_train)

# 获取特征重要性
importances = xgb_reg.feature_importances_
feature_names = X_train.columns
indices = np.argsort(importances)[::-1]
# 绘制特征重要性饼图
def plot_feature_importances(importances, feature_names, num_features=10):
    indices = np.argsort(importances)[::-1][:num_features]
    plt.figure(figsize=(10, 8))
    plt.pie(importances[indices], labels=[feature_names[i] for i in indices], autopct='%1.1f%%', startangle=90)
    plt.axis('equal')  # 确保饼图是圆的
    plt.title('特征重要性')
    plt.show()


# 调用函数绘制特征重要性
plot_feature_importances(importances, feature_names)


# 打印特征重要性
print("Feature ranking:")
for f in range(X_train.shape[1]):
    print("%d. feature %s (%f)" % (f + 1, feature_names[indices[f]], importances[indices[f]]))

# 选择最重要的n个特征
n_top_features = 5
indices = np.argsort(importances)[::-1][:n_top_features]
top_features = [feature_names[i] for i in indices]
X_train_selected = X_train[top_features]
X_test_selected = X_test[top_features]

# 步骤 3: 构建LSTM模型
# 输入数据以匹配LSTM的输入要求 [(samples, time steps, features)]
X_train_reshaped = np.reshape(X_train.values, (X_train.shape[0], 1, X_train.shape[1]))
X_test_reshaped = np.reshape(X_test.values, (X_test.shape[0], 1, X_test.shape[1]))

# LSTM模型构建和训练
model = Sequential()
model.add(LSTM(100, activation='relu', input_shape=(1, X_train.shape[1]), return_sequences=False))
model.add(Dropout(0.2))  # 添加dropout层以减少过拟合
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')

# LSTM训练代码，记录历史损失
history = History()
model.fit(X_train_reshaped, y_train, epochs=100, batch_size=32, verbose=1, callbacks=[history])


# 绘制LSTM损失曲线
def plot_loss_curve(history):
    plt.figure(figsize=(10, 6))
    plt.plot(history.history['loss'], label='train')
    plt.title('LSTM训练过程中的损失')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    plt.show()


plot_loss_curve(history)

# 步骤 4: 模型评估与预测
# 评估LSTM模型
y_pred = model.predict(X_test_reshaped).flatten()  # 确保输出是一维的
r2 = r2_score(y_test, y_pred)
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
mae = mean_absolute_error(y_test, y_pred)
print('R^2:', r2)
print('RMSE:', rmse)
print('MAE:', mae)

# 可视化预测结果
plt.plot(y_test.values, label='真实值', color='#555555')
plt.plot(y_pred, label='预测值', color='#003F87')
plt.xlabel('时间')
plt.ylabel('房价')
plt.legend()
plt.show()

# 可视化贝叶斯优化的收敛过程
plot_convergence(res_gp)

# 获取当前活动的matplotlib图表
fig = plt.gcf()

# 修改图表标题为中文
fig.suptitle('贝叶斯优化收敛过程', fontsize=14, fontweight='bold')

# 尝试修改轴标签（注意：plot_convergence生成的图表可能只有一个轴）
axes = fig.axes
if axes:  # 确保axes不为空
    axes[0].set_xlabel('迭代次数')
    axes[0].set_ylabel('目标函数值')

plt.show()

# 保存预测值和真实值到DataFrame
predictions_df = pd.DataFrame({
    '真实值': y_test,
    '预测值': y_pred
})

# 将DataFrame保存到Excel文件
predictions_df.to_excel('boxgblstm预测值与真实值.xlsx', index=False)