# -*- coding: utf-8 -*-
import optuna
import logging
from typing import Callable, Any, Dict, Tuple

# 设置日志记录器
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - [%(levelname)s] - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)


class JvmAutoTuner:
    """
    一个通用的、基于回调的JVM参数自动调优框架（增强版）。

    V2版本特性:
    - 支持明确的优化目标 (latency, throughput, balanced)。
    - 详尽的迭代日志，包括每轮的参数、指标和当前最优解。
    - 支持基于时间的终止条件 (timeout)。
    - 回调函数可返回多维度指标字典。
    - 支持通过Optuna Pruner进行早期试验剪枝。
    """

    def __init__(
        self,
        param_space: Dict[str, Dict[str, Any]],
        launch_callback: Callable[[str], Any],
        evaluate_callback: Callable[[Any, optuna.trial.Trial], Dict[str, float]],
        goal: str = "balanced",
        direction: str = "minimize",
        n_trials: int = 50,
        timeout: int = None,
        pruner: optuna.pruners.BasePruner = None
    ):
        """
        初始化调优器。

        Args:
            param_space (Dict): 参数搜索空间定义。
            launch_callback (Callable): 启动回调函数。
            evaluate_callback (Callable): 评估回调函数，需返回一个包含指标的字典。
            goal (str): 优化目标。可选: 'latency', 'throughput', 'balanced'。
            direction (str): 优化方向 "minimize"。目前只支持最小化分数。
            n_trials (int): 总的试验次数。
            timeout (int, optional): 总运行超时时间（秒）。
            pruner (optuna.pruners.BasePruner, optional): Optuna剪枝器实例。
        """
        if direction != "minimize":
            raise ValueError("Currently only 'minimize' direction is supported.")
        if goal not in ['latency', 'throughput', 'balanced']:
            raise ValueError(f"Goal '{goal}' is not supported. Use 'latency', 'throughput', or 'balanced'.")

        self.param_space = param_space
        self.launch_cb = launch_callback
        self.eval_cb = evaluate_callback
        self.goal = goal
        self.direction = direction
        self.n_trials = n_trials
        self.timeout = timeout
        self.pruner = pruner

        logger.info("JvmAutoTuner (V2) initialized.")
        logger.info(f"Optimization goal: {self.goal}")
        logger.info(f"Number of trials: {self.n_trials}")
        if self.timeout:
            logger.info(f"Timeout: {self.timeout} seconds")
        if self.pruner:
            logger.info(f"Using pruner: {self.pruner.__class__.__name__}")


    def _calculate_score(self, metrics: Dict[str, float]) -> float:
        """根据优化目标和性能指标计算最终得分（越小越好）。"""
        latency = metrics.get('latency')
        throughput = metrics.get('throughput')

        if self.goal == 'latency':
            if latency is None:
                raise ValueError("Metric 'latency' is required for 'latency' goal.")
            return latency
        
        if self.goal == 'throughput':
            if throughput is None:
                raise ValueError("Metric 'throughput' is required for 'throughput' goal.")
            # 我们要最小化分数，所以吞吐量越大，分数越小
            return -throughput

        if self.goal == 'balanced':
            if latency is None or throughput is None:
                raise ValueError("Metrics 'latency' and 'throughput' are required for 'balanced' goal.")
            # 这是一个简单的归一化平衡公式，可以根据实际情况调整
            # 假设延迟的合理范围是50-500ms，吞吐量是1000-10000 QPS
            normalized_latency = latency / 500 
            normalized_throughput_score = (10000 - throughput) / 10000
            return 0.6 * normalized_latency + 0.4 * normalized_throughput_score

    def _suggest_jvm_opts(self, trial: optuna.trial.Trial) -> str:
        """根据参数空间为当前试验(trial)生成JVM参数字符串。"""
        opts = []
        suggested_params = {}

        # 1. 根据param_space的定义，为每个参数建议一个值
        for name, spec in self.param_space.items():
            val = None
            if spec["type"] == "int":
                val = trial.suggest_int(name, spec["low"], spec["high"], step=spec.get("step", 1))
            elif spec["type"] == "categorical":
                val = trial.suggest_categorical(name, spec["choices"])
            elif spec["type"] == "float":
                val = trial.suggest_float(name, spec["low"], spec["high"])
            elif spec["type"] == "loguniform":
                val = trial.suggest_loguniform(name, spec["low"], spec["high"])
            else:
                logger.warning(f"Unknown parameter type '{spec['type']}' for '{name}'. Skipping.")
                continue
            suggested_params[name] = val
        
        # 2. (新增) 应用常见的最佳实践约束，例如 Xms = Xmx
        if "Xmx" in suggested_params and "Xms" in self.param_space:
            logger.debug(f"Applying constraint: Xms = Xmx = {suggested_params['Xmx']}")
            suggested_params["Xms"] = suggested_params["Xmx"]

        # 3. 根据最终的参数值，构建命令行字符串
        for name, val in suggested_params.items():
            spec = self.param_space[name]
            unit = spec.get("unit", "")
            
            # 跳过仅用于约束的参数（例如，当Xms=Xmx时，我们只添加Xmx）
            if spec.get("is_constraint_target", False):
                 continue

            if name == "GC":
                opts.append(f"-XX:+Use{val}")
                continue
            if spec.get("is_flag", False):
                opts.append(f"-XX:{'+' if val else '-'}{name}")
                continue
            if "condition" in spec:
                dependent_param, required_value = spec["condition"]
                if suggested_params.get(dependent_param) != required_value:
                    continue
            if name.startswith("X"):
                opts.append(f"-{name}{val}{unit}")
            else:
                opts.append(f"-XX:{name}={val}{unit}")
        return " ".join(opts)

    def _objective(self, trial: optuna.trial.Trial) -> float:
        """Optuna的目标函数，封装了完整的“建议-启动-评估”流程。"""
        jvm_opts = self._suggest_jvm_opts(trial)
        logger.info(f"--- Trial #{trial.number} ---")
        logger.info(f"Suggesting JVM options: {jvm_opts}")

        instance = None
        try:
            logger.info(f"[Trial #{trial.number}] Launching application...")
            instance = self.launch_cb(jvm_opts)
            if instance is None:
                raise ValueError("Launch callback must return a valid instance handle, not None.")
            logger.info(f"[Trial #{trial.number}] Application launched: {instance}")
            
            logger.info(f"[Trial #{trial.number}] Evaluating performance...")
            metrics = self.eval_cb(instance, trial)
            logger.info(f"[Trial #{trial.number}] Evaluation metrics: {metrics}")
            
            score = self._calculate_score(metrics)
            logger.info(f"[Trial #{trial.number}] Calculated score = {score:.4f} (Goal: {self.goal})")
            return score

        except optuna.exceptions.TrialPruned as e:
            logger.info(f"[Trial #{trial.number}] Pruned. Reason: {e}")
            raise
        except Exception as e:
            logger.error(f"[Trial #{trial.number}] An error occurred: {e}", exc_info=True)
            return float("inf") # Return a bad score

    def _log_best_trial(self, study, trial):
        """回调函数，用于在每次试验结束后打印当前最优结果。"""
        logger.info(f"--- Trial #{trial.number} Finished ---")
        # 仅当试验成功完成时才打印最优结果
        if trial.state == optuna.trial.TrialState.COMPLETE:
            logger.info(f"Current best score: {study.best_value:.4f}")
            logger.info("Current best params:")
            for key, value in study.best_params.items():
                logger.info(f"  - {key}: {value}")
        else:
            logger.warning(f"Trial failed with state: {trial.state}")
        logger.info("-" * 40)


    def run(self) -> Tuple[Dict[str, Any], float]:
        """
        启动自动调优流程。
        """
        logger.info("Starting JVM auto-tuning process...")
        study = optuna.create_study(direction=self.direction, pruner=self.pruner)
        
        try:
            study.optimize(
                self._objective, 
                n_trials=self.n_trials, 
                timeout=self.timeout,
                callbacks=[self._log_best_trial]
            )
        except KeyboardInterrupt:
            logger.warning("Optimization stopped by user.")

        logger.info("="*50)
        logger.info("Optimization finished.")
        logger.info(f"Number of finished trials: {len(study.trials)}")
        
        if study.best_trial:
            logger.info("Best trial found:")
            best = study.best_trial
            logger.info(f"  Value (Score): {best.value:.4f}")
            logger.info("  Params:")
            for key, value in best.params.items():
                logger.info(f"    {key}: {value}")
            return best.params, best.value
        else:
            logger.warning("No successful trials completed. Could not determine best parameters.")
            return None, float('inf')

