import numpy as np  
import pandas as pd  
import tensorflow as tf  
from sklearn.model_selection import train_test_split  
from sklearn.preprocessing import StandardScaler  
from bayes_opt import BayesianOptimization  
  
# 假设已经准备好带钢产品的规格数据、工艺参数和硬度数据  
X = pd.read_csv('data.csv', usecols=[2, 6, 7, 9])  # 选取四个特征值  
y = pd.read_csv('data.csv', usecols=[12])         # 硬度数据  
  
# 数据预处理：标准化特征值  
scaler = StandardScaler()  
X_scaled = scaler.fit_transform(X)  
  
# 划分数据集为训练集和测试集  
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)  
  
# 定义创建和编译模型的函数  
def create_model(num_neurons_1, num_neurons_2, num_neurons_3, dropout_rate, learning_rate):  
    model = tf.keras.Sequential([  
        tf.keras.layers.Dense(int(num_neurons_1), activation='relu', input_shape=(4,)),  
        tf.keras.layers.Dropout(dropout_rate),  
        tf.keras.layers.Dense(int(num_neurons_2), activation='relu'),  
        tf.keras.layers.Dense(int(num_neurons_3), activation='relu'),  
        tf.keras.layers.Dense(1)  
    ])  
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss='mae')  
    return model  
  
# 定义贝叶斯优化的目标函数  
def bayesian_optimization_target(num_neurons_1, num_neurons_2, num_neurons_3, dropout_rate, learning_rate):  
    model = create_model(num_neurons_1, num_neurons_2, num_neurons_3, dropout_rate, learning_rate)  
    model.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0)  
    loss = model.evaluate(X_test, y_test, verbose=0)  
    return loss  
  
# 设置贝叶斯优化的参数范围  
param_bounds = {  
    'num_neurons_1': (32, 256),  
    'num_neurons_2': (32, 256),  
    'num_neurons_3': (32, 256),  
    'dropout_rate': (0.0, 0.5),  
    'learning_rate': (1e-4, 1e-2)  
}  
  
# 初始化贝叶斯优化对象并开始优化  
bayes_opt = BayesianOptimization(  
    f=bayesian_optimization_target,  
    pbounds=param_bounds,  
    random_state=42,  
)  
  
bayes_opt.maximize(  
    init_points=5,  
    n_iter=30,  
)  
  
# 打印最优的超参数和对应的损失  
print("最优的超参数：", bayes_opt.max)

#使用贝叶斯优化找出最优超参数