import numpy as np
import torch
import matplotlib.pyplot as plt
import warnings
from scipy.optimize import minimize, differential_evolution
from scipy.stats import norm, qmc  # 显式引用qmc模块
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern, ConstantKernel, WhiteKernel
from sklearn.preprocessing import StandardScaler
import tkinter as tk
from tkinter import ttk, messagebox, scrolledtext
import matplotlib.backends.backend_tkagg as tkagg
from matplotlib.figure import Figure
import threading

warnings.filterwarnings("ignore")
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

# 目标函数入口（用户替换 target_module2.py）
from target_module2 import target_function

# 尝试导入 Ax 的 ParameterType（若没有 ax-platform，也会回退）
try:
    from ax import ParameterType
except Exception:
    class ParameterType:
        FLOAT = "FLOAT"
        INT = "INT"

# BoTorch / gpytorch imports（带兼容性尝试）
try:
    from botorch.models import SingleTaskGP
    from botorch.fit import fit_gpytorch_mll
    from botorch.acquisition.monte_carlo import qExpectedImprovement, qNoisyExpectedImprovement
    from botorch.sampling import SobolQMCNormalSampler
    from botorch.optim import optimize_acqf
    from botorch.utils.transforms import standardize
    from gpytorch.mlls import ExactMarginalLogLikelihood

    BOTOCH_OK = True
except Exception as e:
    BOTOCH_OK = False
    print("警告：BoTorch / gpytorch 导入失败，批量 q-EI/q-NEI 将不可用。错误：", e)

# 随机种子
np.random.seed(0)
torch.manual_seed(0)

# 设备/dtype
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
TORCH_DTYPE = torch.double if torch.cuda.is_available() else torch.double  # 使用 double 以提升数值稳定性

# 全局历史
optimization_history = {
    'iteration': [],
    'best_value': [],
    'acquisition_value': [],
    'X_history': [],  # 存储原始参数值（乘以精度后）
    'X_scaled_history': [],  # 存储缩放后的参数值（除以精度后）
    'y_history': [],
    'param_names': [],
    'param_types': [],
    'param_precisions': [],  # 新增：存储各参数的精度
    'opt_type': 'single',
    'batch_size': 1,
    'suggested_points_history': [],  # 记录所有建议点（原始值）
    'suggested_scaled_points_history': [],  # 记录所有建议点（缩放后）
    'suggestion_counts': {}  # 记录建议点计数
}

# 优化参数默认值
DEFAULT_MC_SAMPLES = 512
DEFAULT_GP_RESTARTS = 10
DEFAULT_EI_CANDIDATES = 2000
DEFAULT_EI_TOP_K = 10
DEFAULT_EI_LOCAL_RESTARTS = 6
DEFAULT_JOINT_NUM_RESTARTS = 20
DEFAULT_JOINT_RAW_SAMPLES = 1024
DEFAULT_BATCH_SIZE = 2

# 停止条件默认值 - 修改了DEFAULT_NO_IMPROVEMENT_ITERATIONS为10
DEFAULT_MAX_ITERATIONS = 50
DEFAULT_NO_IMPROVEMENT_ITERATIONS = 10  # 从5修改为10
DEFAULT_TARGET_THRESHOLD = None
DEFAULT_RELATIVE_TOLERANCE = 1e-6


class BayesianOptimizationGUI:
    def __init__(self, root):
        self.root = root
        self.root.title("贝叶斯优化工具（支持精度设置）")
        self.root.geometry("1400x900")  # 增大窗口默认尺寸
        self.root.minsize(1200, 800)  # 增大最小窗口尺寸

        # 参数配置
        self.param_names = []
        self.param_bounds = []  # 存储原始参数范围
        self.param_types = []  # FLOAT 或 INT
        self.param_precisions = []  # 新增：存储各参数的精度
        self.opt_type = "single"  # single, batch_ei, batch_nei
        self.batch_size = DEFAULT_BATCH_SIZE
        self.mode = "1"  # 1=自动目标函数, 2=手动输入结果
        self.mode_var = tk.StringVar(value="1")  # 定义mode_var变量
        self.optimization_running = False
        self.stop_requested = False

        # 优化算法参数
        self.mc_samples = DEFAULT_MC_SAMPLES
        self.gp_restarts = DEFAULT_GP_RESTARTS
        self.ei_candidates = DEFAULT_EI_CANDIDATES
        self.ei_top_k = DEFAULT_EI_TOP_K
        self.ei_local_restarts = DEFAULT_EI_LOCAL_RESTARTS
        self.joint_num_restarts = DEFAULT_JOINT_NUM_RESTARTS
        self.joint_raw_samples = DEFAULT_JOINT_RAW_SAMPLES

        # 停止条件参数
        self.max_iterations = DEFAULT_MAX_ITERATIONS
        self.no_improvement_iterations = DEFAULT_NO_IMPROVEMENT_ITERATIONS
        self.target_threshold = DEFAULT_TARGET_THRESHOLD
        self.relative_tolerance = DEFAULT_RELATIVE_TOLERANCE
        self.use_target_threshold = tk.BooleanVar(value=False)
        self.use_relative_tolerance = tk.BooleanVar(value=False)
        self.repeat_stop_threshold = tk.IntVar(value=4)  # 建议点重复次数阈值从5改为4

        # 图表初始化标记
        self.fig_initialized = False

        # 创建主界面
        self.create_widgets()

    def create_widgets(self):
        # 创建主框架
        main_notebook = ttk.Notebook(self.root)
        main_notebook.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)

        # 参数配置标签页
        self.param_frame = ttk.Frame(main_notebook)
        main_notebook.add(self.param_frame, text="参数配置")

        # 优化设置标签页
        self.optimization_frame = ttk.Frame(main_notebook)
        main_notebook.add(self.optimization_frame, text="优化设置")

        # 停止条件标签页
        self.stop_conditions_frame = ttk.Frame(main_notebook)
        main_notebook.add(self.stop_conditions_frame, text="停止条件")

        # 优化过程标签页
        self.process_frame = ttk.Frame(main_notebook)
        main_notebook.add(self.process_frame, text="优化过程")

        # 结果分析标签页
        self.results_frame = ttk.Frame(main_notebook)
        main_notebook.add(self.results_frame, text="结果分析")

        # 设置参数配置界面
        self.setup_param_frame()

        # 设置优化设置界面
        self.setup_optimization_frame()

        # 设置停止条件界面
        self.setup_stop_conditions_frame()

        # 设置优化过程界面
        self.setup_process_frame()

        # 设置结果分析界面
        self.setup_results_frame()

    def setup_param_frame(self):
        # 参数列表框架
        param_list_frame = ttk.LabelFrame(self.param_frame, text="参数列表")
        param_list_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)

        # 创建表格 - 新增"精度"列
        columns = ("index", "name", "min", "max", "precision", "type")
        self.param_tree = ttk.Treeview(param_list_frame, columns=columns, show="headings", height=10)

        # 设置列标题
        self.param_tree.heading("index", text="序号")
        self.param_tree.heading("name", text="参数名称")
        self.param_tree.heading("min", text="最小值")
        self.param_tree.heading("max", text="最大值")
        self.param_tree.heading("precision", text="精度")
        self.param_tree.heading("type", text="类型")

        # 设置列宽
        self.param_tree.column("index", width=50, anchor=tk.CENTER)
        self.param_tree.column("name", width=150, anchor=tk.CENTER)
        self.param_tree.column("min", width=100, anchor=tk.CENTER)
        self.param_tree.column("max", width=100, anchor=tk.CENTER)
        self.param_tree.column("precision", width=100, anchor=tk.CENTER)  # 新增精度列
        self.param_tree.column("type", width=80, anchor=tk.CENTER)

        self.param_tree.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=5, pady=5)

        # 滚动条
        scrollbar = ttk.Scrollbar(param_list_frame, orient=tk.VERTICAL, command=self.param_tree.yview)
        self.param_tree.configure(yscroll=scrollbar.set)
        scrollbar.pack(side=tk.RIGHT, fill=tk.Y, pady=5)

        # 参数操作框架 - 新增精度输入框
        param_ops_frame = ttk.Frame(self.param_frame)
        param_ops_frame.pack(fill=tk.X, padx=10, pady=5)

        # 参数输入框
        ttk.Label(param_ops_frame, text="参数名称:").grid(row=0, column=0, padx=5, pady=5, sticky=tk.W)
        self.param_name_entry = ttk.Entry(param_ops_frame, width=20)
        self.param_name_entry.grid(row=0, column=1, padx=5, pady=5)

        ttk.Label(param_ops_frame, text="最小值:").grid(row=0, column=2, padx=5, pady=5, sticky=tk.W)
        self.param_min_entry = ttk.Entry(param_ops_frame, width=15)
        self.param_min_entry.grid(row=0, column=3, padx=5, pady=5)

        ttk.Label(param_ops_frame, text="最大值:").grid(row=0, column=4, padx=5, pady=5, sticky=tk.W)
        self.param_max_entry = ttk.Entry(param_ops_frame, width=15)
        self.param_max_entry.grid(row=0, column=5, padx=5, pady=5)

        # 新增精度输入框
        ttk.Label(param_ops_frame, text="精度:").grid(row=0, column=6, padx=5, pady=5, sticky=tk.W)
        self.param_precision_entry = ttk.Entry(param_ops_frame, width=15)
        self.param_precision_entry.insert(0, "1.0")  # 默认精度为1.0
        self.param_precision_entry.grid(row=0, column=7, padx=5, pady=5)

        ttk.Label(param_ops_frame, text="类型:").grid(row=0, column=8, padx=5, pady=5, sticky=tk.W)
        # 将参数类型默认值从float改为int
        self.param_type_var = tk.StringVar(value="int")
        type_frame = ttk.Frame(param_ops_frame)
        type_frame.grid(row=0, column=9, padx=5, pady=5)
        ttk.Radiobutton(type_frame, text="浮点", variable=self.param_type_var, value="float").pack(side=tk.LEFT)
        ttk.Radiobutton(type_frame, text="整数", variable=self.param_type_var, value="int").pack(side=tk.LEFT)

        # 按钮
        button_frame = ttk.Frame(self.param_frame)
        button_frame.pack(fill=tk.X, padx=10, pady=5)

        ttk.Button(button_frame, text="添加参数", command=self.add_parameter).pack(side=tk.LEFT, padx=5)
        ttk.Button(button_frame, text="编辑选中参数", command=self.edit_parameter).pack(side=tk.LEFT, padx=5)
        ttk.Button(button_frame, text="删除选中参数", command=self.delete_parameter).pack(side=tk.LEFT, padx=5)
        ttk.Button(button_frame, text="清空参数", command=self.clear_parameters).pack(side=tk.LEFT, padx=5)
        ttk.Button(button_frame, text="下一步", command=lambda: self.root.nametowidget(".!notebook").select(1)).pack(
            side=tk.RIGHT, padx=5)

    def setup_optimization_frame(self):
        # 优化类型设置
        opt_type_frame = ttk.LabelFrame(self.optimization_frame, text="优化类型")
        opt_type_frame.pack(fill=tk.X, padx=10, pady=10)

        self.opt_type_var = tk.StringVar(value="single")
        # 添加参数变化的回调函数
        self.opt_type_var.trace_add("write", self.update_algorithm_parameters)

        ttk.Radiobutton(opt_type_frame, text="单次优化 (EI)", variable=self.opt_type_var, value="single").pack(
            side=tk.LEFT, padx=10, pady=5)

        batch_frame = ttk.Frame(opt_type_frame)
        batch_frame.pack(side=tk.LEFT, padx=10, pady=5)

        self.batch_ei_radio = ttk.Radiobutton(batch_frame, text="批量优化 (q-EI)", variable=self.opt_type_var,
                                              value="batch_ei")
        self.batch_ei_radio.pack(side=tk.LEFT)

        self.batch_nei_radio = ttk.Radiobutton(batch_frame, text="批量优化 (q-NEI)", variable=self.opt_type_var,
                                               value="batch_nei")
        self.batch_nei_radio.pack(side=tk.LEFT, padx=5)

        if not BOTOCH_OK:
            self.batch_ei_radio.config(state=tk.DISABLED)
            self.batch_nei_radio.config(state=tk.DISABLED)
            ttk.Label(batch_frame, text="(需要BoTorch支持)", foreground="red").pack(side=tk.LEFT, padx=5)

        ttk.Label(batch_frame, text="批量大小:").pack(side=tk.LEFT, padx=5)
        self.batch_size_var = tk.StringVar(value=str(DEFAULT_BATCH_SIZE))
        self.batch_size_entry = ttk.Entry(batch_frame, textvariable=self.batch_size_var, width=5)
        self.batch_size_entry.pack(side=tk.LEFT, padx=5)

        # 实验设置
        experiment_frame = ttk.LabelFrame(self.optimization_frame, text="实验设置")
        experiment_frame.pack(fill=tk.X, padx=10, pady=10)

        ttk.Label(experiment_frame, text="初始采样点数:").grid(row=0, column=0, padx=10, pady=5, sticky=tk.W)
        self.init_samples_var = tk.StringVar(value="5")
        ttk.Entry(experiment_frame, textvariable=self.init_samples_var, width=10).grid(row=0, column=1, padx=5, pady=5)

        # 优化算法参数设置 - 创建一个容器，用于动态更新
        self.algorithm_frame = ttk.LabelFrame(self.optimization_frame, text="优化算法参数")
        self.algorithm_frame.pack(fill=tk.X, padx=10, pady=10)

        # 创建参数变量
        self.mc_samples_var = tk.StringVar(value=str(DEFAULT_MC_SAMPLES))
        self.gp_restarts_var = tk.StringVar(value=str(DEFAULT_GP_RESTARTS))
        self.ei_candidates_var = tk.StringVar(value=str(DEFAULT_EI_CANDIDATES))
        self.ei_top_k_var = tk.StringVar(value=str(DEFAULT_EI_TOP_K))
        self.ei_local_restarts_var = tk.StringVar(value=str(DEFAULT_EI_LOCAL_RESTARTS))
        self.joint_num_restarts_var = tk.StringVar(value=str(DEFAULT_JOINT_NUM_RESTARTS))
        self.joint_raw_samples_var = tk.StringVar(value=str(DEFAULT_JOINT_RAW_SAMPLES))

        # 初始更新参数显示
        self.update_algorithm_parameters()

        # 评估模式
        mode_frame = ttk.LabelFrame(self.optimization_frame, text="评估模式")
        mode_frame.pack(fill=tk.X, padx=10, pady=10)

        ttk.Radiobutton(mode_frame, text="自动评估 (使用目标函数)", variable=self.mode_var, value="1").pack(
            side=tk.LEFT, padx=10, pady=5)
        ttk.Radiobutton(mode_frame, text="手动输入结果", variable=self.mode_var, value="2").pack(side=tk.LEFT, padx=10,
                                                                                                 pady=5)

        # 按钮
        button_frame = ttk.Frame(self.optimization_frame)
        button_frame.pack(side=tk.BOTTOM, fill=tk.X, padx=10, pady=10)

        ttk.Button(button_frame, text="上一步", command=lambda: self.root.nametowidget(".!notebook").select(0)).pack(
            side=tk.LEFT, padx=5)
        ttk.Button(button_frame, text="下一步", command=lambda: self.root.nametowidget(".!notebook").select(2)).pack(
            side=tk.RIGHT, padx=5)

    def setup_stop_conditions_frame(self):
        # 停止条件说明
        desc_frame = ttk.Frame(self.stop_conditions_frame)
        desc_frame.pack(fill=tk.X, padx=10, pady=10)
        ttk.Label(desc_frame, text="优化将在以下任一条件满足时停止：", font=("Arial", 10, "bold")).pack(anchor=tk.W)

        # 停止条件设置
        conditions_frame = ttk.LabelFrame(self.stop_conditions_frame, text="停止条件设置")
        conditions_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=5)

        # 最大迭代次数
        ttk.Label(conditions_frame, text="最大迭代次数:").grid(row=0, column=0, padx=10, pady=10, sticky=tk.W)
        self.max_iter_var = tk.StringVar(value=str(DEFAULT_MAX_ITERATIONS))
        ttk.Entry(conditions_frame, textvariable=self.max_iter_var, width=10).grid(row=0, column=1, padx=5, pady=10)
        ttk.Label(conditions_frame, text="达到此迭代次数后停止").grid(row=0, column=2, padx=10, pady=10, sticky=tk.W)

        # 无改进迭代次数 - 默认值已改为10
        ttk.Label(conditions_frame, text="连续无改进迭代次数:").grid(row=1, column=0, padx=10, pady=10, sticky=tk.W)
        self.no_improvement_var = tk.StringVar(value=str(DEFAULT_NO_IMPROVEMENT_ITERATIONS))
        ttk.Entry(conditions_frame, textvariable=self.no_improvement_var, width=10).grid(row=1, column=1, padx=5,
                                                                                         pady=10)
        ttk.Label(conditions_frame, text="连续多次无改进后停止").grid(row=1, column=2, padx=10, pady=10, sticky=tk.W)

        # 建议点重复次数阈值 - 改为4
        ttk.Label(conditions_frame, text="建议点重复次数阈值:").grid(row=2, column=0, padx=10, pady=10, sticky=tk.W)
        ttk.Entry(conditions_frame, textvariable=self.repeat_stop_threshold, width=10).grid(row=2, column=1, padx=5,
                                                                                            pady=10)
        ttk.Label(conditions_frame, text="当最优解点被建议多次后停止").grid(row=2, column=2, padx=10, pady=10,
                                                                            sticky=tk.W)

        # 目标阈值
        threshold_frame = ttk.Frame(conditions_frame)
        threshold_frame.grid(row=3, column=0, columnspan=3, sticky=tk.W, padx=10, pady=10)

        ttk.Checkbutton(threshold_frame, text="使用目标阈值停止", variable=self.use_target_threshold,
                        command=self.update_stop_condition_fields).pack(side=tk.LEFT, padx=5)

        self.target_threshold_entry = ttk.Entry(threshold_frame, width=15, state=tk.DISABLED)
        if DEFAULT_TARGET_THRESHOLD is not None:
            self.target_threshold_entry.insert(0, str(DEFAULT_TARGET_THRESHOLD))
        self.target_threshold_entry.pack(side=tk.LEFT, padx=5)

        ttk.Label(threshold_frame, text="当目标值超过此阈值时停止").pack(side=tk.LEFT, padx=10)

        # 相对容差
        tolerance_frame = ttk.Frame(conditions_frame)
        tolerance_frame.grid(row=4, column=0, columnspan=3, sticky=tk.W, padx=10, pady=10)

        ttk.Checkbutton(tolerance_frame, text="使用相对容差停止", variable=self.use_relative_tolerance,
                        command=self.update_stop_condition_fields).pack(side=tk.LEFT, padx=5)

        self.relative_tolerance_entry = ttk.Entry(tolerance_frame, width=15, state=tk.DISABLED)
        self.relative_tolerance_entry.insert(0, str(DEFAULT_RELATIVE_TOLERANCE))
        self.relative_tolerance_entry.pack(side=tk.LEFT, padx=5)

        ttk.Label(tolerance_frame, text="当连续改进小于此比例时停止").pack(side=tk.LEFT, padx=10)

        # 按钮
        button_frame = ttk.Frame(self.stop_conditions_frame)
        button_frame.pack(side=tk.BOTTOM, fill=tk.X, padx=10, pady=10)

        ttk.Button(button_frame, text="上一步", command=lambda: self.root.nametowidget(".!notebook").select(1)).pack(
            side=tk.LEFT, padx=5)
        ttk.Button(button_frame, text="开始优化", command=self.start_optimization).pack(side=tk.RIGHT, padx=5)

        # 初始化控件状态
        self.update_stop_condition_fields()

    def update_stop_condition_fields(self):
        """更新停止条件输入框的状态"""
        if self.use_target_threshold.get():
            self.target_threshold_entry.config(state=tk.NORMAL)
        else:
            self.target_threshold_entry.config(state=tk.DISABLED)

        if self.use_relative_tolerance.get():
            self.relative_tolerance_entry.config(state=tk.NORMAL)
        else:
            self.relative_tolerance_entry.config(state=tk.DISABLED)

    def update_algorithm_parameters(self, *args):
        """根据当前选择动态更新显示的算法参数"""
        # 清空现有参数控件
        for widget in self.algorithm_frame.winfo_children():
            widget.destroy()

        opt_type = self.opt_type_var.get()
        row = 0

        # 所有优化类型都共有的参数
        ttk.Label(self.algorithm_frame, text="GP优化重启次数:").grid(row=row, column=0, padx=10, pady=5, sticky=tk.W)
        ttk.Entry(self.algorithm_frame, textvariable=self.gp_restarts_var, width=10).grid(row=row, column=1, padx=5,
                                                                                          pady=5)
        row += 1

        # 根据优化类型显示不同的参数
        if opt_type == "single":
            # 单次优化 (EI) 参数
            ttk.Label(self.algorithm_frame, text="EI候选点数:").grid(row=row, column=0, padx=10, pady=5, sticky=tk.W)
            ttk.Entry(self.algorithm_frame, textvariable=self.ei_candidates_var, width=10).grid(row=row, column=1,
                                                                                                padx=5, pady=5)
            row += 1

            ttk.Label(self.algorithm_frame, text="EI顶级候选数:").grid(row=row, column=0, padx=10, pady=5, sticky=tk.W)
            ttk.Entry(self.algorithm_frame, textvariable=self.ei_top_k_var, width=10).grid(row=row, column=1, padx=5,
                                                                                           pady=5)
            row += 1

            ttk.Label(self.algorithm_frame, text="EI局部重启次数:").grid(row=row, column=0, padx=10, pady=5,
                                                                         sticky=tk.W)
            ttk.Entry(self.algorithm_frame, textvariable=self.ei_local_restarts_var, width=10).grid(row=row, column=1,
                                                                                                    padx=5, pady=5)
            row += 1

        else:  # 批量优化 (q-EI/q-NEI)
            # 批量优化共享参数
            ttk.Label(self.algorithm_frame, text="蒙特卡洛采样次数:").grid(row=row, column=0, padx=10, pady=5,
                                                                           sticky=tk.W)
            ttk.Entry(self.algorithm_frame, textvariable=self.mc_samples_var, width=10).grid(row=row, column=1, padx=5,
                                                                                             pady=5)
            row += 1

            # 联合优化参数
            ttk.Label(self.algorithm_frame, text="联合优化重启次数:").grid(row=row, column=0, padx=10, pady=5,
                                                                           sticky=tk.W)
            ttk.Entry(self.algorithm_frame, textvariable=self.joint_num_restarts_var, width=10).grid(row=row, column=1,
                                                                                                     padx=5, pady=5)
            row += 1

            ttk.Label(self.algorithm_frame, text="联合优化原始样本数:").grid(row=row, column=0, padx=10, pady=5,
                                                                             sticky=tk.W)
            ttk.Entry(self.algorithm_frame, textvariable=self.joint_raw_samples_var, width=10).grid(row=row, column=1,
                                                                                                    padx=5, pady=5)
            row += 1

            # 算法类型说明
            if opt_type == "batch_nei":
                ttk.Label(self.algorithm_frame, text="(使用q-NEI算法，适用于带噪声的目标函数)", foreground="blue").grid(
                    row=row, column=0, columnspan=2, padx=10, pady=5, sticky=tk.W)
            else:
                ttk.Label(self.algorithm_frame, text="(使用q-EI算法，适用于无噪声的目标函数)", foreground="green").grid(
                    row=row, column=0, columnspan=2, padx=10, pady=5, sticky=tk.W)
            row += 1

    def setup_process_frame(self):
        # 状态和日志区域
        log_frame = ttk.LabelFrame(self.process_frame, text="优化日志")
        log_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)

        self.log_text = scrolledtext.ScrolledText(log_frame, wrap=tk.WORD, state=tk.DISABLED)
        self.log_text.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)

        # 当前最佳值显示
        status_frame = ttk.LabelFrame(self.process_frame, text="当前状态")
        status_frame.pack(fill=tk.X, padx=10, pady=5)

        ttk.Label(status_frame, text="当前最佳值:").grid(row=0, column=0, padx=10, pady=5, sticky=tk.W)
        self.best_value_var = tk.StringVar(value="--")
        ttk.Label(status_frame, textvariable=self.best_value_var, font=("Arial", 10, "bold")).grid(row=0, column=1,
                                                                                                   padx=5, pady=5,
                                                                                                   sticky=tk.W)

        ttk.Label(status_frame, text="迭代进度:").grid(row=0, column=2, padx=10, pady=5, sticky=tk.W)
        self.progress_var = tk.StringVar(value="0/0")
        ttk.Label(status_frame, textvariable=self.progress_var).grid(row=0, column=3, padx=5, pady=5, sticky=tk.W)

        # 停止条件状态显示
        ttk.Label(status_frame, text="停止条件状态:").grid(row=0, column=4, padx=10, pady=5, sticky=tk.W)
        self.stop_condition_status = tk.StringVar(value="监控中...")
        ttk.Label(status_frame, textvariable=self.stop_condition_status).grid(row=0, column=5, padx=5, pady=5,
                                                                              sticky=tk.W)

        # 按钮
        button_frame = ttk.Frame(self.process_frame)
        button_frame.pack(fill=tk.X, padx=10, pady=5)

        self.stop_button = ttk.Button(button_frame, text="停止优化", command=self.stop_optimization, state=tk.DISABLED)
        self.stop_button.pack(side=tk.RIGHT, padx=5)

    def setup_results_frame(self):
        # 使用水平和垂直的PanedWindow创建更灵活的布局
        main_paned = ttk.PanedWindow(self.results_frame, orient=tk.HORIZONTAL)
        main_paned.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)

        # 左侧：最优参数和统计信息
        left_frame = ttk.Frame(main_paned, width=350)
        main_paned.add(left_frame, weight=1)

        # 最优参数
        ttk.Label(left_frame, text="最优参数:", font=("Arial", 10, "bold")).pack(anchor=tk.W, padx=5, pady=5)
        self.best_params_text = scrolledtext.ScrolledText(left_frame, wrap=tk.WORD, state=tk.DISABLED, height=10)
        self.best_params_text.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)

        # 优化统计
        ttk.Label(left_frame, text="优化统计:", font=("Arial", 10, "bold")).pack(anchor=tk.W, padx=5, pady=5)
        self.stats_text = scrolledtext.ScrolledText(left_frame, wrap=tk.WORD, state=tk.DISABLED, height=10)
        self.stats_text.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)

        # 停止原因
        ttk.Label(left_frame, text="停止原因:", font=("Arial", 10, "bold")).pack(anchor=tk.W, padx=5, pady=5)
        self.stop_reason_text = scrolledtext.ScrolledText(left_frame, wrap=tk.WORD, state=tk.DISABLED, height=3)
        self.stop_reason_text.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)

        # 右侧：只保留收敛曲线和采集函数值图
        right_frame = ttk.Frame(main_paned)
        main_paned.add(right_frame, weight=3)  # 给图表区域更大的权重

        # 创建图表容器
        conv_chart_container = ttk.Frame(right_frame)
        conv_chart_container.pack(fill=tk.BOTH, expand=True, padx=5, pady=5)

        # 创建 matplotlib 图表 - 收敛曲线和采集函数值
        self.fig = Figure(figsize=(10, 6), dpi=100)

        # 1. 收敛曲线图（上方）
        self.ax1 = self.fig.add_subplot(211)
        self.ax1.set_title('目标值收敛曲线')
        self.ax1.set_xlabel('迭代次数')
        self.ax1.set_ylabel('最佳目标值')
        self.ax1.grid(True, alpha=0.3)

        # 2. 采集函数值图（下方）
        self.ax2 = self.fig.add_subplot(212)
        self.ax2.set_title('采集函数值变化')
        self.ax2.set_xlabel('迭代次数')
        self.ax2.set_ylabel('EI/q-EI值')
        self.ax2.grid(True, alpha=0.3)

        self.fig.tight_layout()

        # 创建Tkinter画布并添加到框架
        self.canvas = tkagg.FigureCanvasTkAgg(self.fig, master=conv_chart_container)
        self.canvas.draw()
        self.canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True)

        # 标记图表已初始化
        self.fig_initialized = True

        # 按钮
        button_frame = ttk.Frame(self.results_frame)
        button_frame.pack(fill=tk.X, padx=10, pady=5)

        ttk.Button(button_frame, text="返回参数配置",
                   command=lambda: self.root.nametowidget(".!notebook").select(0)).pack(side=tk.LEFT, padx=5)
        ttk.Button(button_frame, text="重新优化", command=self.restart_optimization).pack(side=tk.RIGHT, padx=5)

    # 参数管理函数 - 新增精度相关处理
    def add_parameter(self):
        try:
            name = self.param_name_entry.get().strip()
            min_val = float(self.param_min_entry.get())
            max_val = float(self.param_max_entry.get())
            precision = float(self.param_precision_entry.get())  # 获取精度值
            ptype = self.param_type_var.get()

            # 验证精度
            if precision <= 0:
                messagebox.showerror("错误", "精度必须大于0")
                return

            # 验证对于整数类型，(max - min) 必须是精度的整数倍
            if ptype == "int":
                range_val = max_val - min_val
                if not np.isclose(range_val % precision, 0, atol=1e-9):
                    messagebox.showerror("错误", f"对于整数类型，(最大值 - 最小值) 必须是精度 {precision} 的整数倍")
                    return

            if not name:
                name = f"param_{len(self.param_names) + 1}"

            if max_val <= min_val:
                messagebox.showerror("错误", "最大值必须大于最小值")
                return

            # 检查名称是否已存在
            if name in self.param_names:
                messagebox.showerror("错误", f"参数名 '{name}' 已存在")
                return

            # 添加到列表
            self.param_names.append(name)
            self.param_bounds.append((min_val, max_val))
            self.param_precisions.append(precision)  # 保存精度
            self.param_types.append(ParameterType.INT if ptype == "int" else ParameterType.FLOAT)

            # 更新表格
            self.update_param_tree()

            # 清空输入框
            self.param_name_entry.delete(0, tk.END)
            self.param_min_entry.delete(0, tk.END)
            self.param_max_entry.delete(0, tk.END)
            self.param_precision_entry.delete(0, tk.END)
            self.param_precision_entry.insert(0, "1.0")  # 重置为默认精度
            # 保持类型选择为整数作为默认
            self.param_type_var.set("int")

        except ValueError:
            messagebox.showerror("错误", "请输入有效的数值")
        except Exception as e:
            messagebox.showerror("错误", f"添加参数失败: {str(e)}")

    def edit_parameter(self):
        selected = self.param_tree.selection()
        if not selected:
            messagebox.showinfo("提示", "请先选择要编辑的参数")
            return

        try:
            index = int(self.param_tree.item(selected[0])['values'][0]) - 1
            old_name = self.param_names[index]

            name = self.param_name_entry.get().strip()
            min_val = float(self.param_min_entry.get())
            max_val = float(self.param_max_entry.get())
            precision = float(self.param_precision_entry.get())  # 获取精度值
            ptype = self.param_type_var.get()

            # 验证精度
            if precision <= 0:
                messagebox.showerror("错误", "精度必须大于0")
                return

            # 验证对于整数类型，(max - min) 必须是精度的整数倍
            if ptype == "int":
                range_val = max_val - min_val
                if not np.isclose(range_val % precision, 0, atol=1e-9):
                    messagebox.showerror("错误", f"对于整数类型，(最大值 - 最小值) 必须是精度 {precision} 的整数倍")
                    return

            if not name:
                name = old_name

            if max_val <= min_val:
                messagebox.showerror("错误", "最大值必须大于最小值")
                return

            # 检查名称是否已存在（排除当前编辑的参数）
            if name != old_name and name in self.param_names:
                messagebox.showerror("错误", f"参数名 '{name}' 已存在")
                return

            # 更新参数
            self.param_names[index] = name
            self.param_bounds[index] = (min_val, max_val)
            self.param_precisions[index] = precision  # 更新精度
            self.param_types[index] = ParameterType.INT if ptype == "int" else ParameterType.FLOAT

            # 更新表格
            self.update_param_tree()

        except ValueError:
            messagebox.showerror("错误", "请输入有效的数值")
        except Exception as e:
            messagebox.showerror("错误", f"编辑参数失败: {str(e)}")

    def delete_parameter(self):
        selected = self.param_tree.selection()
        if not selected:
            messagebox.showinfo("提示", "请先选择要删除的参数")
            return

        try:
            index = int(self.param_tree.item(selected[0])['values'][0]) - 1

            # 从列表中删除
            del self.param_names[index]
            del self.param_bounds[index]
            del self.param_precisions[index]  # 删除精度
            del self.param_types[index]

            # 更新表格
            self.update_param_tree()

        except Exception as e:
            messagebox.showerror("错误", f"删除参数失败: {str(e)}")

    def clear_parameters(self):
        if messagebox.askyesno("确认", "确定要清空所有参数吗?"):
            self.param_names = []
            self.param_bounds = []
            self.param_precisions = []  # 清空精度列表
            self.param_types = []
            self.update_param_tree()

    def update_param_tree(self):
        # 清空现有项
        for item in self.param_tree.get_children():
            self.param_tree.delete(item)

        # 添加所有参数 - 包含精度信息
        for i, (name, (min_val, max_val), precision, ptype) in enumerate(
                zip(self.param_names, self.param_bounds, self.param_precisions, self.param_types)):
            type_str = "整数" if ptype == ParameterType.INT else "浮点"
            self.param_tree.insert("", tk.END, values=(
                i + 1, name, f"{min_val:.6f}", f"{max_val:.6f}", f"{precision:.6f}", type_str))

    # 日志函数
    def log(self, message):
        self.root.after(0, lambda: self._append_log(message))

    def _append_log(self, message):
        self.log_text.config(state=tk.NORMAL)
        self.log_text.insert(tk.END, message + "\n")
        self.log_text.see(tk.END)
        self.log_text.config(state=tk.DISABLED)

    # 优化控制函数
    def start_optimization(self):
        # 检查参数是否已设置
        if not self.param_names:
            messagebox.showerror("错误", "请先设置参数")
            return

        # 检查批量大小
        try:
            self.opt_type = self.opt_type_var.get()
            if self.opt_type in ["batch_ei", "batch_nei"]:
                self.batch_size = int(self.batch_size_var.get())
                if self.batch_size < 2:
                    messagebox.showerror("错误", "批量大小必须大于等于2")
                    return

                if not BOTOCH_OK:
                    messagebox.showerror("错误", "批量优化需要BoTorch支持")
                    return

        except ValueError:
            messagebox.showerror("错误", "请输入有效的批量大小")
            return

        # 检查初始采样点数
        try:
            self.n_init = int(self.init_samples_var.get())
            if self.n_init < 1:
                messagebox.showerror("错误", "初始采样点数必须大于0")
                return
        except ValueError:
            messagebox.showerror("错误", "请输入有效的初始采样点数")
            return

        # 获取停止条件参数
        try:
            self.max_iterations = int(self.max_iter_var.get())
            self.no_improvement_iterations = int(self.no_improvement_var.get())
            self.repeat_threshold = self.repeat_stop_threshold.get()

            if self.max_iterations < 1:
                messagebox.showerror("错误", "最大迭代次数必须大于0")
                return

            if self.no_improvement_iterations < 1:
                messagebox.showerror("错误", "连续无改进迭代次数必须大于0")
                return

            if self.repeat_threshold < 1:
                messagebox.showerror("错误", "建议点重复次数阈值必须大于0")
                return

            self.target_threshold = None
            if self.use_target_threshold.get():
                self.target_threshold = float(self.target_threshold_entry.get())

            self.relative_tolerance = None
            if self.use_relative_tolerance.get():
                self.relative_tolerance = float(self.relative_tolerance_entry.get())
                if self.relative_tolerance <= 0 or self.relative_tolerance >= 1:
                    messagebox.showerror("错误", "相对容差必须在(0, 1)范围内")
                    return

        except ValueError:
            messagebox.showerror("错误", "请输入有效的停止条件参数")
            return

        # 获取优化算法参数
        try:
            # 公共参数
            self.gp_restarts = int(self.gp_restarts_var.get())

            # 根据优化类型获取相应参数
            if self.opt_type == "single":
                self.ei_candidates = int(self.ei_candidates_var.get())
                self.ei_top_k = int(self.ei_top_k_var.get())
                self.ei_local_restarts = int(self.ei_local_restarts_var.get())

                # 验证单次优化参数
                if self.ei_candidates < 1:
                    messagebox.showerror("错误", "EI候选点数必须大于0")
                    return
                if self.ei_top_k < 1:
                    messagebox.showerror("错误", "EI顶级候选数必须大于0")
                    return
                if self.ei_local_restarts < 1:
                    messagebox.showerror("错误", "EI局部重启次数必须大于0")
                    return
            else:
                self.mc_samples = int(self.mc_samples_var.get())
                self.joint_num_restarts = int(self.joint_num_restarts_var.get())
                self.joint_raw_samples = int(self.joint_raw_samples_var.get())

                # 验证批量优化参数
                if self.mc_samples < 1:
                    messagebox.showerror("错误", "蒙特卡洛采样次数必须大于0")
                    return
                if self.joint_num_restarts < 1:
                    messagebox.showerror("错误", "联合优化重启次数必须大于0")
                    return
                if self.joint_raw_samples < 1:
                    messagebox.showerror("错误", "联合优化原始样本数必须大于0")
                    return

            # 验证公共参数
            if self.gp_restarts < 1:
                messagebox.showerror("错误", "GP优化重启次数必须大于0")
                return

        except ValueError:
            messagebox.showerror("错误", "请输入有效的优化算法参数")
            return

        # 获取模式
        self.mode = self.mode_var.get()

        # 切换到过程标签页
        self.root.nametowidget(".!notebook").select(3)

        # 清空日志
        self.log_text.config(state=tk.NORMAL)
        self.log_text.delete(1.0, tk.END)
        self.log_text.config(state=tk.DISABLED)

        # 重置状态
        self.optimization_running = True
        self.stop_requested = False
        self.stop_button.config(state=tk.NORMAL)
        self.best_value_var.set("--")
        self.progress_var.set(f"0/{self.max_iterations}")
        self.stop_condition_status.set("监控中...")

        # 在新线程中运行优化
        threading.Thread(target=self.run_optimization, daemon=True).start()

    def stop_optimization(self):
        if messagebox.askyesno("确认", "确定要停止优化吗?"):
            self.stop_requested = True
            self.stop_button.config(text="停止中...", state=tk.DISABLED)
            self.stop_condition_status.set("用户请求停止")

    def restart_optimization(self):
        self.root.nametowidget(".!notebook").select(2)
        self.start_optimization()

    # 优化核心函数 - 增加精度相关转换
    def run_optimization(self):
        global optimization_history
        stop_reason = "未知原因"
        try:
            # 初始化历史记录 - 增加精度相关字段
            optimization_history = {
                'iteration': [],
                'best_value': [],
                'acquisition_value': [],
                'X_history': [],  # 存储原始参数值（乘以精度后）
                'X_scaled_history': [],  # 存储缩放后的参数值（除以精度后）
                'y_history': [],
                'param_names': self.param_names.copy(),
                'param_types': self.param_types.copy(),
                'param_precisions': self.param_precisions.copy(),  # 存储各参数的精度
                'opt_type': 'single' if self.opt_type == 'single' else 'batch',
                'batch_size': self.batch_size,
                'suggested_points_history': [],  # 记录所有建议点（原始值）
                'suggested_scaled_points_history': [],  # 记录所有建议点（缩放后）
                'suggestion_counts': {}  # 记录建议点计数
            }

            d = len(self.param_names)
            bounds = np.array(self.param_bounds)
            precisions = np.array(self.param_precisions)  # 精度数组

            # 计算缩放后的参数范围（除以精度）
            scaled_bounds = []
            for i in range(d):
                min_val, max_val = bounds[i]
                precision = precisions[i]
                scaled_min = min_val / precision
                scaled_max = max_val / precision
                scaled_bounds.append((scaled_min, scaled_max))
            scaled_bounds = np.array(scaled_bounds)

            self.log("=== 优化配置 ===")
            self.log(f"参数个数: {d}")
            self.log("参数配置:")
            for i, (name, (min_val, max_val), precision, ptype) in enumerate(
                    zip(self.param_names, self.param_bounds, self.param_precisions, self.param_types)):
                type_str = "整数" if ptype == ParameterType.INT else "浮点"
                scaled_min = min_val / precision
                scaled_max = max_val / precision
                self.log(f"  {i + 1}. {name}:")
                self.log(f"    原始范围: [{min_val}, {max_val}], 精度: {precision}, 类型: {type_str}")
                self.log(f"    缩放范围: [{scaled_min}, {scaled_max}] (原始值 / {precision})")

            if self.opt_type in ["batch_ei", "batch_nei"]:
                self.log(
                    f"优化类型: 批量优化 ({'q-NEI' if self.opt_type == 'batch_nei' else 'q-EI'}, batch_size={self.batch_size})")
            else:
                self.log(f"优化类型: 单次优化 (EI)")

            self.log(f"初始采样点数: {self.n_init}")
            self.log(f"评估模式: {'自动评估' if self.mode == '1' else '手动输入'}")
            self.log(f"设备: {DEVICE}")

            # 打印停止条件
            self.log("\n--- 停止条件 ---")
            self.log(f"最大迭代次数: {self.max_iterations}")
            self.log(f"连续无改进迭代次数: {self.no_improvement_iterations}")
            self.log(f"建议点重复次数阈值: {self.repeat_threshold}")
            if self.use_target_threshold.get():
                self.log(f"目标阈值: {self.target_threshold}")
            if self.use_relative_tolerance.get():
                self.log(f"相对容差: {self.relative_tolerance}")

            # 打印优化算法参数
            self.log("\n--- 优化算法参数 ---")
            self.log(f"GP优化重启次数: {self.gp_restarts}")

            if self.opt_type == "single":
                self.log(f"EI候选点数: {self.ei_candidates}")
                self.log(f"EI顶级候选数: {self.ei_top_k}")
                self.log(f"EI局部重启次数: {self.ei_local_restarts}")
            else:
                self.log(f"蒙特卡洛采样次数: {self.mc_samples}")
                self.log(f"联合优化重启次数: {self.joint_num_restarts}")
                self.log(f"联合优化原始样本数: {self.joint_raw_samples}")

            # 初始采样 - 修复Sobol采样器调用问题
            self.log("\n--- 初始采样阶段 ---")
            self.log(f"使用Sobol序列生成 {self.n_init} 个初始采样点")

            # 正确初始化Sobol采样器
            sampler = qmc.Sobol(d=d, scramble=True, seed=None)
            X0_scaled = sampler.random_base2(m=int(np.log2(self.n_init)) + 1)  # 使用base2确保质量
            # 如果生成的样本数超过需要，截断到需要的数量
            if len(X0_scaled) > self.n_init:
                X0_scaled = X0_scaled[:self.n_init]

            # 将采样点映射到缩放后的参数范围
            for i in range(d):
                X0_scaled[:, i] = X0_scaled[:, i] * (scaled_bounds[i, 1] - scaled_bounds[i, 0]) + scaled_bounds[i, 0]

            # 对缩放后的参数进行四舍五入（确保整数类型在缩放后也是整数）
            X_scaled = self.round_for_types_scaled(X0_scaled, self.param_types)

            # 转换回原始参数值（乘以精度）
            X = self.scaled_to_original(X_scaled, precisions)

            y = []

            for i, x in enumerate(X):
                if self.stop_requested:
                    raise Exception("优化已被用户停止")

                if self.mode == "1":
                    y_val = float(target_function(x))
                    self.log(f"初始点 {i + 1}: {self.display_params(x)} -> {y_val:.6f}")
                else:
                    self.log(f"初始点 {i + 1}: {self.display_params(x)}")
                    # 在主线程中获取用户输入
                    y_val = self.get_user_input(f"请输入初始点 {i + 1} 的结果:")
                    if y_val is None:  # 用户取消
                        raise Exception("用户取消输入")

                y.append(y_val)

            y = np.array(y)

            # 记录初始历史 - 包含缩放后的参数
            optimization_history['X_history'] = X.tolist()
            optimization_history['X_scaled_history'] = X_scaled.tolist()
            optimization_history['y_history'] = y.tolist()

            no_improve_count = 0
            best_so_far = float(np.max(y))
            prev_best_values = [best_so_far]  # 用于计算相对改进
            self.root.after(0, lambda: self.best_value_var.set(f"{best_so_far:.6f}"))

            self.log(f"\n--- 贝叶斯优化阶段 ---")
            self.log(f"当前最佳值: {best_so_far:.6f}")

            # 主优化循环
            for it in range(1, self.max_iterations + 1):
                if self.stop_requested:
                    stop_reason = "用户请求停止"
                    raise Exception(stop_reason)

                self.log(f"\n=== 第 {it} 次迭代 ===")
                self.root.after(0, lambda i=it: self.progress_var.set(f"{i}/{self.max_iterations}"))

                # 对缩放后的参数进行标准化（映射到[0,1]）
                X_norm = self.normalize_array(X_scaled, scaled_bounds)

                if self.opt_type == "single":
                    # 单次优化
                    gp = self.fit_gp_sklearn(X_norm, y, n_restarts=self.gp_restarts, random_state=it)
                    y_best_for_ei = np.max(y)
                    x_next_norm, ei_value = self.propose_location_single(
                        gp, np.array([(0.0, 1.0)] * d), y_best_for_ei,
                        n_candidates=self.ei_candidates,
                        top_k=self.ei_top_k,
                        n_local_restarts=self.ei_local_restarts
                    )

                    # 将标准化的点映射回缩放后的参数范围
                    x_next_scaled = self.unnormalize_array(np.array(x_next_norm).reshape(1, -1), scaled_bounds).reshape(
                        -1)

                    # 对缩放后的参数进行四舍五入
                    x_next_scaled = self.round_for_types_scaled(x_next_scaled.reshape(1, -1), self.param_types).reshape(
                        -1)

                    # 转换回原始参数值（乘以精度）
                    x_next = self.scaled_to_original(x_next_scaled.reshape(1, -1), precisions).reshape(-1)

                    # 检查建议点重复性
                    found_repeat_at = -1
                    for hist_idx, past_point in enumerate(optimization_history['suggested_points_history']):
                        if np.allclose(x_next, np.array(past_point), atol=1e-6):
                            found_repeat_at = hist_idx + 1
                            break
                    if found_repeat_at != -1:
                        self.log(f"  -> 警告: 该建议点与历史第 {found_repeat_at} 个建议点重复。")

                    # 更新建议历史 - 同时记录原始值和缩放后的值
                    optimization_history['suggested_points_history'].append(x_next.tolist())
                    optimization_history['suggested_scaled_points_history'].append(x_next_scaled.tolist())

                    point_tuple = tuple(np.round(x_next, 6))
                    optimization_history['suggestion_counts'][point_tuple] = optimization_history[
                                                                                 'suggestion_counts'].get(
                        point_tuple, 0) + 1

                    if self.mode == "1":
                        y_next = float(target_function(x_next))
                        self.log(f"Iter {it:03d}: {self.display_params(x_next)} -> {y_next:.6f}")
                    else:
                        self.log(f"Iter {it:03d}: {self.display_params(x_next)}")
                        y_next = self.get_user_input(f"请输入迭代 {it} 的结果:")
                        if y_next is None:  # 用户取消
                            stop_reason = "用户取消输入"
                            raise Exception(stop_reason)

                    X = np.vstack([X, x_next])
                    X_scaled = np.vstack([X_scaled, x_next_scaled])
                    y = np.append(y, y_next)

                    # 预测信息
                    mu_s, sigma_s = gp.predict(gp.X_scaler_.transform(x_next_norm.reshape(1, -1)), return_std=True)
                    self.log(f"    预测: μ={mu_s[0]:.6f}, σ={sigma_s[0]:.6f}, EI={ei_value:.6f}")

                    # 记录历史
                    optimization_history['acquisition_value'].append(ei_value)

                else:
                    # 批量优化
                    use_noisy = (self.opt_type == "batch_nei")
                    try:
                        X_next_norm, qei_value = self.propose_location_batch_botorch(
                            X_norm, y, np.array([(0.0, 1.0)] * d),
                            self.batch_size, use_noisy=use_noisy,
                            n_candidates=self.joint_raw_samples
                        )

                        # 将标准化的点映射回缩放后的参数范围
                        X_next_scaled = self.unnormalize_array(X_next_norm, scaled_bounds)

                        # 对缩放后的参数进行四舍五入
                        X_next_scaled = self.round_for_types_scaled(X_next_scaled, self.param_types)

                        # 转换回原始参数值（乘以精度）
                        X_next = self.scaled_to_original(X_next_scaled, precisions)

                        self.log(f"Iter {it:03d} (批量 {self.batch_size} 点):")
                        self.log(f"    采集函数值(估计): {qei_value:.6f}")

                        # 逐个检查和记录建议点
                        for i, xx in enumerate(X_next):
                            self.log(f"  点 {i + 1}: {self.display_params(xx)}")
                            found_repeat_at = -1
                            for hist_idx, past_point in enumerate(optimization_history['suggested_points_history']):
                                if np.allclose(xx, np.array(past_point), atol=1e-6):
                                    found_repeat_at = hist_idx + 1
                                    break
                            if found_repeat_at != -1:
                                self.log(f"    -> 警告: 该建议点与历史第 {found_repeat_at} 个建议点重复。")

                            # 更新建议历史 - 同时记录原始值和缩放后的值
                            optimization_history['suggested_points_history'].append(xx.tolist())
                            optimization_history['suggested_scaled_points_history'].append(X_next_scaled[i].tolist())

                            point_tuple = tuple(np.round(xx, 6))
                            optimization_history['suggestion_counts'][point_tuple] = optimization_history[
                                                                                         'suggestion_counts'].get(
                                point_tuple,
                                0) + 1

                        y_next_batch = []
                        if self.mode == "1":
                            for i, x in enumerate(X_next):
                                y_val = float(target_function(x))
                                y_next_batch.append(y_val)
                                self.log(f"  点{i + 1} 结果: {y_val:.6f}")
                        else:
                            for i, x in enumerate(X_next):
                                y_val = self.get_user_input(f"请输入迭代 {it} 点 {i + 1} 的结果:")
                                if y_val is None:  # 用户取消
                                    stop_reason = "用户取消输入"
                                    raise Exception(stop_reason)
                                y_next_batch.append(y_val)

                        X = np.vstack([X, X_next])
                        X_scaled = np.vstack([X_scaled, X_next_scaled])
                        y = np.append(y, y_next_batch)

                        # 记录历史
                        optimization_history['acquisition_value'].append(qei_value)

                    except Exception as e:
                        self.log(f"批量优化出错: {e}")
                        self.log("跳过此次迭代")
                        optimization_history['acquisition_value'].append(0.0)
                        continue

                # 更新历史记录
                optimization_history['iteration'].append(it)
                optimization_history['best_value'].append(float(np.max(y)))
                optimization_history['X_history'] = X.tolist()
                optimization_history['X_scaled_history'] = X_scaled.tolist()
                optimization_history['y_history'] = y.tolist()

                # 更新最佳值
                prev_best = best_so_far
                best_so_far = float(np.max(y))
                self.root.after(0, lambda val=best_so_far: self.best_value_var.set(f"{val:.6f}"))

                # 保存最近的最佳值用于相对改进计算
                prev_best_values.append(best_so_far)
                if len(prev_best_values) > 5:  # 只保留最近5个值
                    prev_best_values.pop(0)

                # 检查是否有改进
                improvement = best_so_far - prev_best
                if improvement > 1e-8:
                    no_improve_count = 0
                    self.log(f"    *** 找到更优解! 当前最佳: {best_so_far:.6f} (改进: {improvement:.8f}) ***")
                else:
                    no_improve_count += 1
                    self.log(f"    无明显改进. 当前最佳: {best_so_far:.6f} (连续无改进: {no_improve_count})")

                # 检查新的停止条件 - 建议点重复次数
                best_idx_current = np.argmax(y)
                best_X_current = X[best_idx_current]
                best_X_tuple = tuple(np.round(best_X_current, 6))
                suggestion_count_for_best = optimization_history['suggestion_counts'].get(best_X_tuple, 0)

                # 检查停止条件
                stop_condition_met = False

                # 1. 检查建议点重复次数
                if suggestion_count_for_best >= self.repeat_threshold:
                    stop_reason = f"当前最优解点已被建议 {suggestion_count_for_best} 次，超过阈值 {self.repeat_threshold}"
                    stop_condition_met = True

                # 2. 检查最大迭代次数
                if not stop_condition_met and it >= self.max_iterations:
                    stop_reason = f"达到最大迭代次数 ({self.max_iterations})"
                    stop_condition_met = True

                # 3. 检查连续无改进迭代次数
                if not stop_condition_met and no_improve_count >= self.no_improvement_iterations:
                    stop_reason = f"连续 {self.no_improvement_iterations} 次无改进"
                    stop_condition_met = True

                # 4. 检查目标阈值
                if not stop_condition_met and self.use_target_threshold.get() and best_so_far >= self.target_threshold:
                    stop_reason = f"达到目标阈值 ({self.target_threshold})"
                    stop_condition_met = True

                # 5. 检查相对容差
                if not stop_condition_met and self.use_relative_tolerance.get() and len(prev_best_values) >= 2:
                    # 计算最近两次迭代的相对改进
                    recent_change = abs(prev_best_values[-1] - prev_best_values[-2])
                    avg_value = (prev_best_values[-1] + prev_best_values[-2]) / 2

                    if avg_value == 0:
                        relative_change = 0.0
                    else:
                        relative_change = recent_change / abs(avg_value)

                    self.log(f"    相对改进: {relative_change:.8f} (容差: {self.relative_tolerance})")

                    if relative_change < self.relative_tolerance:
                        stop_reason = f"相对改进小于容差 ({relative_change:.8f} < {self.relative_tolerance})"
                        stop_condition_met = True

                # 更新停止条件状态
                if stop_condition_met:
                    self.root.after(0, lambda reason=stop_reason: self.stop_condition_status.set(reason))
                    self.log(f"\n停止条件满足: {stop_reason}")
                    break
                else:
                    status_text = f"连续无改进: {no_improve_count}/{self.no_improvement_iterations}, "
                    status_text += f"最佳点建议次数: {suggestion_count_for_best}/{self.repeat_threshold}"
                    if self.use_relative_tolerance.get() and len(prev_best_values) >= 2:
                        status_text += f", 相对改进: {relative_change:.6f}"
                    self.root.after(0, lambda text=status_text: self.stop_condition_status.set(text))

            # 优化完成，显示结果
            best_idx = int(np.argmax(y))
            self.log(f"\n=== 优化完成 ===")
            self.log(f"停止原因: {stop_reason}")
            self.log("最优参数:")
            for name, val in zip(self.param_names, X[best_idx]):
                self.log(f"  {name} = {val:.6f}")
            self.log(f"最优值: {y[best_idx]:.6f}")
            self.log(f"总评估次数: {len(y)}")
            self.log(f"总迭代次数: {len(optimization_history['iteration'])}")

            # 通过主线程更新结果界面
            self.root.after(0, lambda reason=stop_reason: self.update_results(reason))

            # 切换到结果标签页
            self.root.after(0, lambda: self.root.nametowidget(".!notebook").select(4))

        except Exception as e:
            self.log(f"\n优化中断: {str(e)}")
            if stop_reason == "未知原因":
                stop_reason = str(e)
        finally:
            self.optimization_running = False
            self.root.after(0, lambda: self.stop_button.config(text="停止优化", state=tk.DISABLED))
            # 即使发生异常，也尝试更新结果界面
            self.root.after(0, lambda reason=stop_reason: self.update_results(reason))

    def get_user_input(self, prompt):
        # 创建一个对话框获取用户输入
        result = [None]  # 使用列表存储结果，以便在内部函数中修改

        def on_ok():
            try:
                value = float(entry.get())
                result[0] = value
                dialog.destroy()
            except ValueError:
                messagebox.showerror("错误", "请输入有效的数值")

        def on_cancel():
            result[0] = None
            dialog.destroy()

        # 在主线程中创建对话框
        self.root.after(0, lambda: None)  # 确保主线程处理事件

        dialog = tk.Toplevel(self.root)
        dialog.title("输入结果")
        dialog.geometry("300x120")
        dialog.transient(self.root)
        dialog.grab_set()  # 模态对话框

        ttk.Label(dialog, text=prompt).pack(padx=10, pady=10)

        entry_frame = ttk.Frame(dialog)
        entry_frame.pack(fill=tk.X, padx=10, pady=5)

        entry = ttk.Entry(entry_frame)
        entry.pack(side=tk.LEFT, fill=tk.X, expand=True, padx=(0, 10))
        entry.focus()

        button_frame = ttk.Frame(dialog)
        button_frame.pack(fill=tk.X, padx=10, pady=10)

        ttk.Button(button_frame, text="确定", command=on_ok).pack(side=tk.RIGHT, padx=5)
        ttk.Button(button_frame, text="取消", command=on_cancel).pack(side=tk.RIGHT, padx=5)

        # 等待对话框关闭
        self.root.wait_window(dialog)

        return result[0]

    def update_results(self, stop_reason):
        """通过主线程安全地更新结果显示"""
        self.root.after(0, self._actual_update_results, stop_reason)

    def _actual_update_results(self, stop_reason):
        """实际执行结果更新的函数，确保在主线程运行"""
        try:
            # 检查图表是否已初始化
            if not self.fig_initialized:
                self.log("图表尚未初始化，无法更新可视化结果")
                return

            # 检查是否有优化历史数据
            if not optimization_history['y_history']:
                self.log("没有优化历史数据可供显示")
                return

            # 更新最优参数
            best_idx = np.argmax(optimization_history['y_history'])
            best_params = optimization_history['X_history'][best_idx]

            self.best_params_text.config(state=tk.NORMAL)
            self.best_params_text.delete(1.0, tk.END)
            for name, val in zip(optimization_history['param_names'], best_params):
                self.best_params_text.insert(tk.END, f"{name} = {val:.6f}\n")
            self.best_params_text.insert(tk.END, f"\n最优值: {optimization_history['y_history'][best_idx]:.6f}")
            self.best_params_text.config(state=tk.DISABLED)

            # 更新统计信息
            self.stats_text.config(state=tk.NORMAL)
            self.stats_text.delete(1.0, tk.END)
            self.stats_text.insert(tk.END, f"总评估次数: {len(optimization_history['y_history'])}\n")
            self.stats_text.insert(tk.END, f"迭代次数: {len(optimization_history['iteration'])}\n")
            self.stats_text.insert(tk.END, f"最差值: {np.min(optimization_history['y_history']):.6f}\n")
            self.stats_text.insert(tk.END,
                                   f"值域: {np.max(optimization_history['y_history']) - np.min(optimization_history['y_history']):.6f}\n\n")

            # 收敛性分析
            best_values = np.array(optimization_history['best_value'])
            if len(best_values) > 5:
                last_5_improvements = np.diff(best_values[-6:])
                avg_recent_improvement = np.mean(last_5_improvements[last_5_improvements > 0]) if np.any(
                    last_5_improvements > 0) else 0
                self.stats_text.insert(tk.END, f"最近5次迭代平均改进: {avg_recent_improvement:.8f}\n")

            if len(best_values) > 10:
                recent_values = best_values[-10:]
                cv = np.std(recent_values) / np.mean(recent_values) if np.mean(recent_values) != 0 else 0
                self.stats_text.insert(tk.END, f"最近10次最佳值变异系数: {cv:.6f}\n")

                if optimization_history.get('opt_type', 'single') == 'batch':
                    if cv < 0.01:
                        self.stats_text.insert(tk.END, "✓ 批量优化已收敛（变异系数 < 0.01）\n")
                    elif cv < 0.05:
                        self.stats_text.insert(tk.END, "~ 批量优化接近收敛（变异系数 < 0.05）\n")
                    else:
                        self.stats_text.insert(tk.END, "✗ 批量优化尚未收敛，建议继续迭代\n")
                else:
                    if cv < 0.001:
                        self.stats_text.insert(tk.END, "✓ 优化已收敛（变异系数 < 0.001）\n")
                    elif cv < 0.01:
                        self.stats_text.insert(tk.END, "~ 优化接近收敛（变异系数 < 0.01）\n")
                    else:
                        self.stats_text.insert(tk.END, "✗ 优化尚未收敛，建议继续迭代\n")

            # 建议点统计
            if optimization_history['suggestion_counts']:
                most_suggested = max(optimization_history['suggestion_counts'].items(), key=lambda x: x[1])
                self.stats_text.insert(tk.END, f"\n最常被建议的点: {most_suggested[1]} 次\n")

            self.stats_text.config(state=tk.DISABLED)

            # 更新停止原因
            self.stop_reason_text.config(state=tk.NORMAL)
            self.stop_reason_text.delete(1.0, tk.END)
            self.stop_reason_text.insert(tk.END, stop_reason)
            self.stop_reason_text.config(state=tk.DISABLED)

            # 更新图表 - 收敛曲线和采集函数值
            self.ax1.clear()
            self.ax2.clear()

            # 收敛曲线
            self.ax1.plot(optimization_history['iteration'], optimization_history['best_value'], 'b-o', linewidth=2,
                          markersize=4)
            self.ax1.set_title('目标值收敛曲线')
            self.ax1.set_xlabel('迭代次数')
            self.ax1.set_ylabel('最佳目标值')
            self.ax1.grid(True, alpha=0.3)

            # 标记停止条件线
            if optimization_history['best_value']:
                final_best = optimization_history['best_value'][-1]
                self.ax1.axhline(y=final_best, color='r', linestyle='--', alpha=0.7,
                                 label=f'最终最佳值: {final_best:.6f}')

                # 如果使用了目标阈值，添加阈值线
                if self.use_target_threshold.get() and self.target_threshold is not None:
                    self.ax1.axhline(y=self.target_threshold, color='g', linestyle='-.', alpha=0.7,
                                     label=f'目标阈值: {self.target_threshold:.6f}')

                self.ax1.legend()

            # 采集函数值
            self.ax2.plot(optimization_history['iteration'], optimization_history['acquisition_value'], 'g-o',
                          linewidth=2,
                          markersize=4)
            self.ax2.set_title('采集函数值变化')
            self.ax2.set_xlabel('迭代次数')
            self.ax2.set_ylabel('EI/q-EI值')
            self.ax2.grid(True, alpha=0.3)

            self.fig.tight_layout()
            self.canvas.draw()

        except Exception as e:
            self.log(f"更新结果时出错: {str(e)}")
            # 尝试重置图表
            if hasattr(self, 'ax1') and hasattr(self, 'ax2'):
                try:
                    self.ax1.clear()
                    self.ax2.clear()
                    self.ax1.set_title('目标值收敛曲线（加载失败）')
                    self.ax2.set_title('采集函数值变化（加载失败）')
                    self.canvas.draw()
                except:
                    pass

    # 工具函数 - 增加精度相关转换
    def display_params(self, param_values):
        return ", ".join([f"{name}={val:.6f}" for name, val in zip(self.param_names, param_values)])

    def sobol_samples(self, n_samples, dim, scramble=True):
        # 直接使用qmc.Sobol，不设置回退机制
        sampler = qmc.Sobol(d=dim, scramble=scramble, seed=None)
        return sampler.random(n_samples)

    def normalize_array(self, X, bounds):
        """把 X (Nxd) 从原始尺度 -> [0,1]"""
        lb = bounds[:, 0]
        ub = bounds[:, 1]
        return (X - lb) / (ub - lb)

    def unnormalize_array(self, X_norm, bounds):
        """把 X_norm 从 [0,1] -> 原始尺度"""
        lb = bounds[:, 0]
        ub = bounds[:, 1]
        return X_norm * (ub - lb) + lb

    def scaled_to_original(self, X_scaled, precisions):
        """将缩放后的参数（除以精度）转换回原始参数（乘以精度）"""
        X_scaled = np.array(X_scaled)
        precisions = np.array(precisions)

        # 确保 precisions 是正确的形状以进行广播
        if X_scaled.ndim == 2:
            precisions = precisions.reshape(1, -1)

        return X_scaled * precisions

    def round_for_types_scaled(self, X_scaled, param_types):
        """
        对缩放后的参数（X_scaled，已除以精度）根据 param_types 进行四舍五入
        对于整数类型，确保其为整数
        """
        X_scaled = np.array(X_scaled, dtype=float)
        Xr = X_scaled.copy()
        for j, ptype in enumerate(param_types):
            if ptype == ParameterType.INT or ptype == "INT":
                Xr[:, j] = np.rint(Xr[:, j])  # 缩放后的整数参数需要四舍五入为整数
        return Xr

    def expected_improvement(self, X, gp, y_best, xi=0.01):
        """sklearn GP 上的 EI（X 可以是 (k,d) 或 (d,)）"""
        if X.ndim == 1:
            X = X.reshape(1, -1)
        X_scaled = gp.X_scaler_.transform(X)
        mu, sigma = gp.predict(X_scaled, return_std=True)
        mu = mu.reshape(-1)
        sigma = sigma.reshape(-1)
        with np.errstate(divide='warn'):
            improvement = mu - y_best - xi
            Z = np.zeros_like(improvement)
            mask = sigma > 1e-12
            Z[mask] = improvement[mask] / sigma[mask]
            ei = np.zeros_like(improvement)
            ei[mask] = improvement[mask] * norm.cdf(Z[mask]) + sigma[mask] * norm.pdf(Z[mask])
        return np.maximum(ei, 0.0)

    def fit_gp_sklearn(self, X_norm, y_raw, n_restarts=10, random_state=0):
        d = X_norm.shape[1]
        X_scaler = StandardScaler()
        X_scaled = X_scaler.fit_transform(X_norm)
        kernel = ConstantKernel(1.0, (1e-3, 1e3)) * \
                 Matern(length_scale=np.ones(d), length_scale_bounds=(1e-2, 1e2), nu=2.5) + \
                 WhiteKernel(noise_level=1e-3, noise_level_bounds=(1e-8, 1e1))
        gp = GaussianProcessRegressor(kernel=kernel, normalize_y=True,
                                      n_restarts_optimizer=n_restarts, random_state=random_state)
        gp.fit(X_scaled, y_raw)
        gp.X_scaler_ = X_scaler
        return gp

    def propose_location_single(self, gp, bounds_norm, y_best, n_candidates=3000, top_k=8, n_local_restarts=6):
        """使用采样 + L-BFGS-B + DE 的单点 EI 优化（返回归一化到 [0,1] 的点）"""
        dim = len(bounds_norm)
        bounds_array = np.array(bounds_norm)
        X_cand = self.sobol_samples(n_candidates, dim)
        # map cand to bounds_norm
        if not np.allclose(bounds_array[:, 0], 0.0) or not np.allclose(bounds_array[:, 1], 1.0):
            ranges = bounds_array[:, 1] - bounds_array[:, 0]
            X_cand = X_cand * ranges + bounds_array[:, 0]

        ei_vals = self.expected_improvement(X_cand, gp, y_best)
        top_idx = np.argsort(-ei_vals)[:top_k]
        best_x, best_ei = None, -1.0

        for idx in top_idx:
            x0 = X_cand[idx]
            seeds = [x0] + [np.clip(x0 + 0.02 * np.random.randn(dim),
                                    bounds_array[:, 0], bounds_array[:, 1])
                            for _ in range(max(0, n_local_restarts - 1))]
            for seed in seeds:
                try:
                    res = minimize(
                        lambda x: -self.expected_improvement(np.array(x).reshape(1, -1), gp, y_best)[0],
                        x0=seed,
                        bounds=bounds_norm,
                        method='L-BFGS-B',
                        options={'maxiter': 200}
                    )
                    cand = np.clip(res.x, bounds_array[:, 0], bounds_array[:, 1])
                    cand_ei = self.expected_improvement(cand.reshape(1, -1), gp, y_best)[0]
                    if cand_ei > best_ei + 1e-12:
                        best_ei, best_x = float(cand_ei), cand
                except Exception:
                    continue

        # differential_evolution fallback
        try:
            def neg_ei_flat(x_flat):
                return -float(self.expected_improvement(np.array(x_flat).reshape(1, -1), gp, y_best)[0])

            de_res = differential_evolution(neg_ei_flat, bounds=bounds_norm, polish=True, maxiter=200, popsize=10,
                                            tol=1e-6)
            cand = np.clip(de_res.x, bounds_array[:, 0], bounds_array[:, 1])
            cand_ei = self.expected_improvement(cand.reshape(1, -1), gp, y_best)[0]
            if cand_ei > best_ei + 1e-12:
                best_ei, best_x = float(cand_ei), cand
        except Exception:
            pass

        if best_x is None:
            best_x = X_cand[np.argmax(ei_vals)]
            best_ei = float(np.max(ei_vals))

        best_x = np.clip(best_x, bounds_array[:, 0], bounds_array[:, 1])
        return best_x, best_ei

    def fit_botorch_gp(self, X_norm, y_raw):
        """用 BoTorch SingleTaskGP 拟合（X_norm: ndarray in [0,1]）"""
        if not BOTOCH_OK:
            raise RuntimeError("BoTorch 未安装，无法拟合 BoTorch GP。")

        X_tensor = torch.tensor(X_norm, dtype=TORCH_DTYPE, device=DEVICE)
        y_tensor = torch.tensor(y_raw.reshape(-1, 1), dtype=TORCH_DTYPE, device=DEVICE)

        # 标准化 y（botorch 的 SingleTaskGP 通常直接传入原始 y，但 MC EI 需要 best_f）
        y_standardized = standardize(y_tensor)

        model = SingleTaskGP(X_tensor, y_standardized)
        model = model.to(device=DEVICE, dtype=TORCH_DTYPE)

        mll = ExactMarginalLogLikelihood(model.likelihood, model)
        model.train()
        try:
            fit_gpytorch_mll(mll)
        except Exception as e:
            # 兼容不同版本的 fit 函数
            try:
                from botorch.fit import fit_gpytorch_model
                fit_gpytorch_model(mll)
            except Exception:
                self.log(f"警告：BoTorch 的 fit 函数失败，继续使用未经充分训练的模型。{e}")

        model.eval()
        return model

    def propose_location_batch_botorch(self, X_norm, y_raw, bounds_norm, batch_size, use_noisy=False,
                                       n_candidates=1024):
        """
        使用 BoTorch 的 optimize_acqf 生成 batch 候选点（返回 normed candidates）
        X_norm: existing data in [0,1]
        """
        if not BOTOCH_OK:
            raise RuntimeError("BoTorch 不可用，无法进行批量 q-EI/q-NEI。")

        d = X_norm.shape[1]
        model = self.fit_botorch_gp(X_norm, y_raw)
        model.eval()

        sampler = SobolQMCNormalSampler(sample_shape=torch.Size([self.mc_samples]))
        X_tensor = torch.tensor(X_norm, dtype=TORCH_DTYPE, device=DEVICE)
        y_tensor = torch.tensor(y_raw.reshape(-1, 1), dtype=TORCH_DTYPE, device=DEVICE)

        if use_noisy:
            try:
                from botorch.acquisition.monte_carlo import qNoisyExpectedImprovement
                acq = qNoisyExpectedImprovement(model=model, X_baseline=X_tensor, sampler=sampler)
                self.log("使用 q-NEI (noisy).")
            except Exception as e:
                self.log(f"无法导入 qNoisyExpectedImprovement: {e}")
                from botorch.acquisition.monte_carlo import qExpectedImprovement
                best_f = standardize(y_tensor).max().item()
                acq = qExpectedImprovement(model=model, best_f=best_f, sampler=sampler)
                self.log("回退到 q-EI（注意：这不是 q-NEI）")
        else:
            from botorch.acquisition.monte_carlo import qExpectedImprovement
            best_f = standardize(y_tensor).max().item()
            acq = qExpectedImprovement(model=model, best_f=best_f, sampler=sampler)
            self.log(f"使用 q-EI (Expected Improvement)，当前最佳值（原始尺度）: {np.max(y_raw):.6f}")

        bounds_tensor = torch.tensor(bounds_norm, dtype=TORCH_DTYPE, device=DEVICE).T

        # 优先尝试 joint 优化
        try:
            cand_joint, val_joint = optimize_acqf(
                acq_function=acq,
                bounds=bounds_tensor,
                q=batch_size,
                num_restarts=min(self.joint_num_restarts, 40),
                raw_samples=max(512, n_candidates),
                options={"maxiter": 200},
                return_best_only=True,
                sequential=False
            )
            candidates = cand_joint.detach().cpu().numpy()
            acq_value = float(val_joint.item())
            # 检查重复或 nan
            if np.isnan(candidates).any() or np.unique(np.round(candidates, 10), axis=0).shape[0] < candidates.shape[0]:
                raise RuntimeError("joint result suspicious")
            return candidates, acq_value
        except Exception as e:
            # 回退 sequential 贪心
            self.log(f"joint 优化失败或结果可疑，回退 sequential 贪心: {e}")

        seq_candidates = []
        try:
            for k in range(batch_size):
                cand_k, val_k = optimize_acqf(
                    acq_function=acq,
                    bounds=bounds_tensor,
                    q=1,
                    num_restarts=8,
                    raw_samples=max(256, n_candidates // 2),
                    options={"maxiter": 200},
                    return_best_only=True,
                    sequential=True
                )
                xk = cand_k.detach().cpu().numpy().reshape(-1)
                seq_candidates.append(xk)
            seq_arr = np.vstack(seq_candidates)
            self.log("sequential 回退成功")
            return seq_arr, 0.0
        except Exception as e:
            self.log(f"sequential 回退也失败: {e}")

        # 最后后备：随机采样 + sklearn EI 评分选择 top q
        self.log("所有方法失败，使用随机后备方案...")
        try:
            rand_np = np.random.rand(5000, d)
            gp_tmp = self.fit_gp_sklearn(X_norm, y_raw, n_restarts=3, random_state=0)
            ei_vals = self.expected_improvement(rand_np, gp_tmp, np.max(y_raw))
            top_idx = np.argsort(-ei_vals)[:batch_size]
            final = rand_np[top_idx]
            final_acq = float(np.max(ei_vals[top_idx]))
            return final, final_acq
        except Exception as e:
            self.log(f"随机后备也失败: {e}")
            final = np.random.rand(batch_size, d)
            return final, 0.0


if __name__ == "__main__":
    root = tk.Tk()
    app = BayesianOptimizationGUI(root)
    root.mainloop()
