import os
import pickle
import time
import logging
from typing import Dict, Any
import optuna
import yaml

from alphasql.config.mcts_config import MCTSConfig
from alphasql.runner.mcts_runner import MCTSRunner
from alphasql.runner.task import Task

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# --- 全局变量，用于在目标函数中传递额外参数 ---
# Optuna的objective函数只接受一个trial对象作为参数，
# 所以我们用全局变量来传递任务列表和配置路径
TASKS_SUBSET = []
BASE_CONFIG_PATH = ''

def objective(trial: optuna.Trial) -> float:
    """
    Optuna调用的目标函数。
    :param trial: Optuna的Trial对象，用于建议超参数。
    :return: 一个需要被最大化的浮点数值（这里是reward）。
    """
    try:
        # 1. 定义并建议超参数
        config_override = {
            'exploration_constant': trial.suggest_float('exploration_constant', 0.5, 3.0),
            'max_depth': trial.suggest_int('max_depth', 8, 32),
            'temperature': trial.suggest_float('temperature', 0.1, 1.0),
            'n': trial.suggest_int('n', 1, 5),
            'top_p': trial.suggest_float('top_p', 0.7, 1.0),
            'max_rollout_steps': trial.suggest_int('max_rollout_steps', 10, 50) # 将rollout也作为超参
        }

        # 2. 加载基础配置并覆盖
        with open(BASE_CONFIG_PATH, 'r') as f:
            config_data = yaml.safe_load(f)
        mcts_config = MCTSConfig.model_validate(config_data)
        mcts_config.max_rollout_steps = config_override['max_rollout_steps']
        mcts_config.exploration_constant = config_override['exploration_constant']
        mcts_config.max_depth = config_override['max_depth']
        if mcts_config.mcts_model_kwargs:
            mcts_config.mcts_model_kwargs['temperature'] = config_override['temperature']
            mcts_config.mcts_model_kwargs['n'] = config_override['n']
            mcts_config.mcts_model_kwargs['top_p'] = config_override['top_p']

        # 3. 在任务子集上运行MCTS并评估
        total_reward = 0
        total_time = 0
        
        runner = MCTSRunner(mcts_config)

        for task in TASKS_SUBSET:
            start_time = time.time()
            final_node = runner.run_one_task(task)
            total_time += time.time() - start_time

            if final_node and hasattr(final_node, 'Q') and hasattr(final_node, 'N') and final_node.N > 0:
                total_reward += final_node.Q / final_node.N
            else:
                total_reward += -1 # 对无效路径给予惩罚

        avg_reward = total_reward / len(TASKS_SUBSET)
        
        logger.info(f"Trial {trial.number}: Reward={avg_reward:.4f}, Time={total_time:.2f}s, Params={trial.params}")

        # Optuna默认是最小化目标，但我们想最大化reward，所以直接返回reward即可，并在study中指定direction='maximize'
        return avg_reward

    except Exception as e:
        logger.error(f"Trial {trial.number} failed: {e}", exc_info=True)
        # 对于失败的trial，返回一个非常差的值
        return -1.0


def main():
    global TASKS_SUBSET, BASE_CONFIG_PATH

    # --- 配置 ---
    BASE_CONFIG_PATH = 'config/qwen32b_bird_dev.yaml'
    n_trials = 50  # 总共尝试的超参数组合数量
    n_startup_trials = 10 # 在开始剪枝前，先进行随机搜索的次数
    
    # 加载任务
    with open(BASE_CONFIG_PATH, 'r') as f:
        config_data = yaml.safe_load(f)
    base_config = MCTSConfig.model_validate(config_data)
    with open(base_config.tasks_file_path, 'rb') as f:
        all_tasks = pickle.load(f)
    
    # 为了快速演示，我们只使用前3个任务作为评估集
    TASKS_SUBSET = all_tasks[:3]
    logger.info(f"Using {len(TASKS_SUBSET)} tasks for optimization.")

    # --- 运行Optuna优化 ---
    # 使用TPE采样器（类似贝叶斯优化）和Hyperband剪枝器（类似BOHB）
    sampler = optuna.samplers.TPESampler(n_startup_trials=n_startup_trials)
    pruner = optuna.pruners.HyperbandPruner(min_resource=1, max_resource=n_trials, reduction_factor=3)
    
    # Optuna的Study对象管理整个优化过程
    # 我们希望最大化reward，所以设置 direction='maximize'
    study = optuna.create_study(direction='maximize', sampler=sampler, pruner=pruner)
    
    try:
        study.optimize(objective, n_trials=n_trials, timeout=3600) # 运行n_trials次，或最多1小时
    except KeyboardInterrupt:
        logger.info("Optimization stopped by user.")

    # --- 结果分析 ---
    pruned_trials = study.get_trials(deepcopy=False, states=[optuna.trial.TrialState.PRUNED])
    complete_trials = study.get_trials(deepcopy=False, states=[optuna.trial.TrialState.COMPLETE])

    logger.info("--- Optimization Finished ---")
    logger.info(f"Study statistics: ")
    logger.info(f"  Number of finished trials: {len(study.trials)}")
    logger.info(f"  Number of pruned trials: {len(pruned_trials)}")
    logger.info(f"  Number of complete trials: {len(complete_trials)}")

    logger.info("--- Best trial ---")
    trial = study.best_trial
    logger.info(f"  Value (Average Reward): {trial.value:.4f}")
    logger.info("  Params: ")
    for key, value in trial.params.items():
        logger.info(f"    {key}: {value}")

if __name__ == '__main__':
    # 确保环境变量已设置
    # os.environ['OPENAI_API_KEY'] = 'your_api_key'
    # os.environ['OPENAI_BASE_URL'] = 'your_base_url'
    main()
