import os
import pickle
import time
import logging
from typing import Dict, Any

import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import hpbandster.core.nameserver as hpns
from hpbandster.core.worker import Worker
from hpbandster.optimizers import BOHB

from alphasql.config.mcts_config import MCTSConfig
from alphasql.runner.mcts_runner import MCTSRunner
from alphasql.runner.task import Task

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class MCTSWorker(Worker):
    def __init__(self, tasks, base_config_path, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.tasks = tasks
        self.base_config_path = base_config_path

    def compute(self, config: Dict, budget: float, **kwargs) -> Dict:
        """
        BOHB调用的目标函数。
        :param config: 一套超参数配置。
        :param budget: 资源预算，我们将其映射到MCTS的rollout次数。
        :return: 一个包含'loss'和'info'的字典。
        """
        try:
            # 1. 加载基础配置
            mcts_config = MCTSConfig.from_yaml(self.base_config_path)

            # 2. 使用BOHB传入的参数覆盖配置
            mcts_config.max_rollout_steps = int(budget) # 使用budget作为rollout次数
            mcts_config.exploration_constant = config.get('exploration_constant', mcts_config.exploration_constant)
            mcts_config.max_depth = config.get('max_depth', mcts_config.max_depth)
            if mcts_config.mcts_model_kwargs:
                mcts_config.mcts_model_kwargs['temperature'] = config.get('temperature', mcts_config.mcts_model_kwargs.get('temperature'))
                mcts_config.mcts_model_kwargs['n'] = config.get('n', mcts_config.mcts_model_kwargs.get('n'))
                mcts_config.mcts_model_kwargs['top_p'] = config.get('top_p', mcts_config.mcts_model_kwargs.get('top_p'))

            # 3. 运行MCTS Runner
            # 注意：为了简化，我们这里直接调用MCTSRunner的逻辑，但实际可能需要重构
            # MCTSRunner以支持更灵活的单任务、多参数调用。
            # 这里我们只在一个任务上运行以获取快速反馈。
            task_to_run = self.tasks[0] 
            runner = MCTSRunner(mcts_config)
            
            start_time = time.time()
            final_node = runner.run_one_task(task_to_run)
            end_time = time.time()

            # 4. 计算loss
            # 目标：最大化奖励，最小化时间
            # 我们将reward转换为需要最小化的loss
            # 如果没有有效路径，给予最大惩罚
            if not final_node or not hasattr(final_node, 'Q') or not hasattr(final_node, 'N') or final_node.N == 0:
                loss = float('inf')
                reward = -1
            else:
                reward = final_node.Q / final_node.N
                # loss的设计：我们希望reward高，time低。
                # 一个简单的loss可以是 -reward。如果考虑时间，可以是 time / reward。
                # 这里我们先用 -reward 作为loss，因为BOHB本身会考虑预算（时间）。
                loss = -reward

            logger.info(f"Config: {config}, Budget: {budget}, Reward: {reward:.4f}, Time: {end_time - start_time:.2f}s, Loss: {loss:.4f}")

            return {
                'loss': loss,
                'info': {
                    'reward': reward,
                    'time_taken': end_time - start_time,
                    'rollouts': int(budget)
                }
            }
        except Exception as e:
            logger.error(f"Worker compute failed with config {config} and budget {budget}: {e}", exc_info=True)
            return {
                'loss': float('inf'), # 出现任何异常都返回最大loss
                'info': {'error': str(e)}
            }


def get_configspace():
    """定义超参数的搜索空间"""
    cs = CS.ConfigurationSpace()
    
    cs.add_hyperparameter(CSH.UniformFloatHyperparameter('exploration_constant', lower=0.5, upper=3.0, default_value=1.4))
    cs.add_hyperparameter(CSH.UniformIntegerHyperparameter('max_depth', lower=8, upper=32, default_value=16))
    cs.add_hyperparameter(CSH.UniformFloatHyperparameter('temperature', lower=0.1, upper=1.0, default_value=0.8))
    cs.add_hyperparameter(CSH.UniformIntegerHyperparameter('n', lower=1, upper=5, default_value=3))
    cs.add_hyperparameter(CSH.UniformFloatHyperparameter('top_p', lower=0.7, upper=1.0, default_value=0.9))
    
    return cs

def main():
    # --- 配置 ---
    base_config_path = 'config/qwen32b_bird_dev.yaml'
    run_id = 'alphasql_bohb_opt'
    n_iterations = 10 # BOHB运行的总迭代次数
    min_budget = 10     # 最小rollout次数
    max_budget = 40     # 最大rollout次数
    n_workers = 1       # 并行worker数量

    # 加载任务
    base_config = MCTSConfig.from_yaml(base_config_path)
    with open(base_config.tasks_file_path, 'rb') as f:
        all_tasks = pickle.load(f)
    
    # 为了快速演示，我们只使用前5个任务作为评估集
    tasks_subset = all_tasks[:5]
    logger.info(f"Using {len(tasks_subset)} tasks for optimization.")

    # --- 启动BOHB ---
    # 1. 启动NameServer
    # BOHB需要一个NameServer来协调各个Worker
    # 在Windows上，直接运行可能会有网络问题，这里我们用最简单的方式
    # 如果遇到问题，可能需要手动在命令行启动 `hpbandster-ns`
    try:
        ns = hpns.NameServer(run_id=run_id, host='127.0.0.1', port=None)
        ns_host, ns_port = ns.start()
        logger.info(f"NameServer started at {ns_host}:{ns_port}")

        # 2. 启动Worker
        workers = []
        for i in range(n_workers):
            w = MCTSWorker(tasks=tasks_subset, base_config_path=base_config_path,
                           nameserver=ns_host, nameserver_port=ns_port, run_id=run_id)
            w.run(background=True)
            workers.append(w)
        
        logger.info(f"Started {len(workers)} workers.")

        # 3. 运行BOHB优化器
        bohb = BOHB(configspace=get_configspace(),
                    run_id=run_id,
                    nameserver=ns_host,
                    nameserver_port=ns_port,
                    min_budget=min_budget,
                    max_budget=max_budget)
        
        res = bohb.run(n_iterations=n_iterations)
        logger.info("BOHB run finished.")

    finally:
        # 4. 关闭
        if 'bohb' in locals():
            bohb.shutdown(shutdown_workers=True)
            logger.info("BOHB workers shut down.")
        if 'ns' in locals():
            ns.shutdown()
            logger.info("NameServer shut down.")

    # --- 结果分析 ---
    id2config = res.get_id2config_mapping()
    incumbent = res.get_incumbent_id()
    
    best_config = id2config[incumbent]['config']
    best_loss = res.get_runs_by_id(incumbent)[-1]['loss']

    logger.info('--- Best configuration found ---')
    logger.info(f"Loss: {best_loss:.4f}")
    logger.info("Config:")
    for key, value in best_config.items():
        logger.info(f"  {key}: {value}")

if __name__ == '__main__':
    # 设置环境变量，指向你的API Key和Base URL
    # os.environ['OPENAI_API_KEY'] = 'your_api_key'
    # os.environ['OPENAI_BASE_URL'] = 'your_base_url'
    main()
