import numpy as np
import pandas as pd
from OOA import OOA
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score

plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['font.size'] = 14

# 加载Excel数据文件
data = pd.read_excel('data.xlsx')

# 假设最后一列为输出，其他列为输入
X = data.iloc[:, :-1].values  # 输入数据
y = data.iloc[:, -1].values  # 输出数据

# 使用您的数据和标签
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 目标函数：根据LightGBM的性能返回一个适应度值
def objective_function(params):
    num_leaves, learning_rate, feature_fraction, bagging_fraction, max_depth = params
    # 定义LightGBM参数
    params = {
        'num_leaves': int(num_leaves),  # 树的叶子节点数
        'learning_rate': learning_rate,  # 学习率
        'feature_fraction': feature_fraction,  # 特征采样比例
        'bagging_fraction': bagging_fraction,  # Bagging（Bootstrap Aggregating）比例
        'max_depth': int(max_depth),  # 最大树深度
        'objective': 'regression',  # 回归任务
        'metric': 'mse',  # 评估指标为均方误差（MSE）
        'verbose': 0
    }
    # 创建LightGBM数据集
    train_data = lgb.Dataset(X_train, label=y_train)
    # 训练LightGBM模型
    model = lgb.train(params, train_data, num_boost_round=300)
    # 进行预测
    y_pred = model.predict(X_test)
    # 计算MSE作为适应度值
    mse = mean_squared_error(y_test, y_pred)
    return mse

# 鱼鹰优化参数设置
pop_size = 10  # 种群数量
dim = 5  # 参数维度（num_leaves, learning_rate, feature_fraction, bagging_fraction, max_depth）
lb = np.array([10, 0.01, 0.1, 0.1, 1])  # 参数下界
ub = np.array([120, 0.8, 1.0, 1.0, 20])  # 参数上界
max_iter = 50  # 最大迭代次数
# 是否进行参数优化 (yes/no)
choice ='no'
if choice.lower() == 'yes':
    # 鱼鹰优化算法
    GbestScore, GbestPosition, Curve = OOA(pop_size, dim, lb, ub, max_iter, objective_function)

    # 打印最优参数和性能
    best_num_leaves, best_learning_rate, best_feature_fraction, best_bagging_fraction, best_max_depth = GbestPosition[0]
    print("最优参数：")
    print("num_leaves:", int(best_num_leaves))
    print("learning_rate:", best_learning_rate)
    print("feature_fraction:", best_feature_fraction)
    print("bagging_fraction:", best_bagging_fraction)
    print("max_depth:", int(best_max_depth))
    print("最优性能（MSE）:", GbestScore)

    # 创建一个空列表来存储每次迭代的最佳适应度值
    best_fitness_curve = []

    # 记录每次迭代的最佳适应度值
    for i in range(max_iter):
        best_fitness_curve.append(Curve[i])

    # 绘制收敛速度曲线
    plt.figure(figsize=(8, 6))
    plt.plot(range(1, max_iter + 1), best_fitness_curve, marker='o', linestyle='-', markersize=5, color='#1f77b4')
    plt.title("收敛速度曲线", fontsize=16)
    plt.xlabel("迭代次数", fontsize=12)
    plt.ylabel("最佳适应度值", fontsize=12)
    plt.grid(True)
    plt.xticks(fontsize=10)
    plt.yticks(fontsize=10)
    plt.tight_layout()
    plt.savefig('优化收敛图.png', dpi=600)
    plt.show()

    # 重新训练LightGBM模型
    best_params = {
        'num_leaves': int(best_num_leaves),
        'learning_rate': best_learning_rate,
        'feature_fraction': best_feature_fraction,
        'bagging_fraction': best_bagging_fraction,
        'max_depth': int(best_max_depth),
        'objective': 'regression',
        'metric': 'mse',
        'verbose': 0
    }
    train_data = lgb.Dataset(X_train, label=y_train)
    model = lgb.train(best_params, train_data, num_boost_round=300)

    # 进行预测
    y_pred = model.predict(X_test)

    # 计算R方、MSE和MAE
    r2 = r2_score(y_test, y_pred)
    mse = mean_squared_error(y_test, y_pred)
    mae = mean_absolute_error(y_test, y_pred)

    print("R方 (R-squared):", r2)
    print("MSE (均方误差):", mse)
    print("MAE (平均绝对误差):", mae)

    # 保存训练好的模型
    model.save_model('lightgbm_optimized_model.txt')
else:
    # 加载已保存的模型
    model = lgb.Booster(model_file='lightgbm_optimized_model.txt')

    # 进行预测
    y_pred = model.predict(X_test)

    # 计算R方、MSE和MAE
    r2 = r2_score(y_test, y_pred)
    mse = mean_squared_error(y_test, y_pred)
    mae = mean_absolute_error(y_test, y_pred)
    final_y_pred = y_pred
    print("R方 (R-squared):", r2)
    print("MSE (均方误差):", mse)
    print("MAE (平均绝对误差):", mae)
    import seaborn as sns

    plt.figure(figsize=(10, 6))

    # 绘制真实值和预测值的对比曲线
    sns.lineplot(x=range(len(y_test)), y=y_test, label='真实值', color='blue')
    sns.lineplot(x=range(len(final_y_pred)), y=final_y_pred, label='OOA-LightGBM', color='red', linestyle='dashed')

    plt.xlabel('样本序号', fontsize='medium')
    plt.ylabel('目标值', fontsize='medium')
    plt.title(f'R²: {r2:.4f}     MSE: {mse:.4f}     MAE: {mae:.4f}', fontsize=16)

    plt.legend()

    plt.savefig('预测对比图.png', dpi=600)
    plt.show()

    import seaborn as sns
    plt.figure(figsize=(8, 6))
    # 绘制散点图和拟合线
    sns.scatterplot(x=y_test, y=final_y_pred, color='#8c564b', marker='o', label='预测值', alpha=0.7)
    sns.lineplot(x=y_test, y=y_test, color='#d62728', linestyle='-', label='y=x', linewidth=2)
    plt.xlabel('真实值')
    plt.ylabel('预测值')
    plt.title('真实值 vs 预测值')
    plt.legend()
    plt.grid(True, linestyle='-', alpha=0.7)
    plt.xticks(fontsize=10)
    plt.yticks(fontsize=10)
    plt.tight_layout()
    plt.savefig('scatter_plot_msimap.png', dpi=600)
    plt.show()

    plt.figure(figsize=(8, 6))
    # 绘制误差直方分布图
    errors = y_test - final_y_pred
    plt.hist(errors, bins=20, color='skyblue', edgecolor='black', alpha=0.7)
    # 添加网格线
    plt.grid(axis='y', linestyle='--', alpha=0.7)
    plt.xlabel('误差', fontsize=12)
    plt.ylabel('频数', fontsize=12)
    plt.title('误差直方分布图', fontsize=14)
    # 添加平均误差标记
    mean_error = np.mean(errors)
    plt.axvline(mean_error, color='red', linestyle='dashed', linewidth=2, label=f'平均误差 = {mean_error:.2f}')
    plt.legend()
    plt.xticks(fontsize=10)
    plt.yticks(fontsize=10)
    plt.tight_layout()
    plt.savefig('误差直方分布图.png', dpi=600)  # 指定保存的文件名和文件格式（例如PNG）
    plt.show()