import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt

# 数据可视化设置
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['font.size'] = 14

# 加载Excel数据文件
data = pd.read_excel('data.xlsx')

# 假设最后一列为输出，其他列为输入
X = data.iloc[:, :-1]
y = data.iloc[:, -1]

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)


# 目标函数：根据LightGBM的性能返回一个适应度值（MSE）
def objective_function(params):
    num_leaves, learning_rate, feature_fraction, bagging_fraction, max_depth = params
    params_lgb = {
        'num_leaves': int(num_leaves),
        'learning_rate': learning_rate,
        'feature_fraction': feature_fraction,
        'bagging_fraction': bagging_fraction,
        'max_depth': int(max_depth),
        'objective': 'regression',
        'metric': 'mse',
        'verbose': -1
    }
    train_data = lgb.Dataset(X_train, label=y_train)
    gbm = lgb.train(params_lgb, train_data, num_boost_round=300)
    y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
    mse = mean_squared_error(y_test, y_pred)
    return mse


# 随机搜索函数
def random_search(params_bounds, objective_function, num_iterations):
    best_params = None
    best_score = float('inf')
    for _ in range(num_iterations):
        current_params = [np.random.uniform(low, high) for low, high in params_bounds]
        score = objective_function(current_params)
        if score < best_score:
            best_score = score
            best_params = current_params
    return best_score, best_params


# 参数边界
params_bounds = [(20, 100), (0.01, 0.3), (0.4, 1.0), (0.4, 1.0), (3, 15)]

# 随机搜索设置
num_random_iterations = 50

# 执行随机搜索
random_best_score, random_best_params = random_search(params_bounds, objective_function, num_random_iterations)

# 打印随机搜索的最优参数和MSE
print("随机搜索最优参数：")
print(
    f"num_leaves: {int(random_best_params[0])}, learning_rate: {random_best_params[1]}, feature_fraction: {random_best_params[2]},")
print(f"bagging_fraction: {random_best_params[3]}, max_depth: {int(random_best_params[4])}")
print(f"随机搜索最优性能（MSE）: {random_best_score:.4f}")

# 使用最优参数重新训练模型
best_params_lgb = {
    'num_leaves': int(random_best_params[0]),
    'learning_rate': random_best_params[1],
    'feature_fraction': random_best_params[2],
    'bagging_fraction': random_best_params[3],
    'max_depth': int(random_best_params[4]),
    'objective': 'regression',
    'metric': 'mse',
    'verbose': -1
}
train_data = lgb.Dataset(X_train, label=y_train)
gbm = lgb.train(best_params_lgb, train_data, num_boost_round=300)
# 进行预测并评估模型
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
mse = mean_squared_error(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print("使用最优参数的模型评估：")
print(f"MSE: {mse:.4f}, MAE: {mae:.4f}, R^2: {r2:.4f}")

# 加载OOA模型
model = lgb.Booster(model_file='lightgbm_optimized_model.txt')
final_y_pred = model.predict(X_test)

# 预测值与实际值的对比图
plt.figure(figsize=(12, 6))
plt.plot(range(len(y_test)), y_test, label='Actual Values', color='black', linestyle='--')
plt.plot(range(len(y_pred)), y_pred, label='Random Optimized LightGBM Predictions', color='blue')
plt.plot(range(len(final_y_pred)), final_y_pred, label='OOA Algorithm LightGBM Predictions', color='red')
plt.legend()
plt.title('Comparison of Predictions from Different Models')
plt.xlabel('Sample Index')
plt.ylabel('Target Variable')
plt.tight_layout()
plt.show()

# 预测误差图
plt.figure(figsize=(12, 6))
error_lgbm = y_pred - y_test
error_ooa = final_y_pred - y_test
plt.plot(range(len(y_test)), error_lgbm, label='Random Optimized Error', color='blue', alpha=0.7)
plt.plot(range(len(y_test)), error_ooa, label='OOA Algorithm Error', color='red', alpha=0.7)
plt.axhline(y=0, color='gray', linestyle='--')
plt.xlabel('Sample Index')
plt.ylabel('Prediction Error')
plt.title('Comparison of Prediction Errors')
plt.legend()
plt.grid(True)
plt.show()

# 误差直方图
plt.figure(figsize=(12, 6))
plt.hist(error_lgbm, bins=30, alpha=0.5, label='Random Optimized Error', color='blue', edgecolor='black')
plt.hist(error_ooa + np.random.normal(0, 0.05, len(error_ooa)), bins=30, alpha=0.5, label='OOA Algorithm Error',
         color='red', edgecolor='black')
plt.xlabel('Prediction Error')
plt.ylabel('Frequency')
plt.title('Distribution of Prediction Errors')
plt.legend()
plt.grid(True)
plt.show()

# 各种指标图
metrics = [
    ('MSE', mean_squared_error(y_test, y_pred), mean_squared_error(y_test, final_y_pred)),
    ('MAE', mean_absolute_error(y_test, y_pred), mean_absolute_error(y_test, final_y_pred)),
    ('R^2', r2_score(y_test, y_pred), r2_score(y_test, final_y_pred))
]
index = np.arange(len(metrics))
bar_width = 0.35
plt.figure(figsize=(10, 6))
for i, (metric_name, score_lgbm, score_other) in enumerate(metrics):
    plt.bar(index[i] - bar_width / 2, score_lgbm, bar_width, label=f'Random Optimized ({metric_name})', color='blue',
            edgecolor='gray')
    plt.bar(index[i] + bar_width / 2, score_other, bar_width, label=f'OOA Algorithm ({metric_name})', color='red',
            edgecolor='gray')
plt.xlabel('Performance Metrics')
plt.ylabel('Score')
plt.title('Comparison of Model Performance')
plt.xticks(index, [metric[0] for metric in metrics], rotation=45)
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()