# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os, sys
import torch
import yaml
from typing import Dict, Any, Optional, List
from rstar_deepthink.llms.rm import *
from transformers import AutoConfig, AutoTokenizer
from vllm import LLM, SamplingParams

def load_models_config(config_path: str) -> Dict[str, Any]:
    """
    加载模型配置文件，解析YAML格式的模型配置
    
    Args:
        config_path: 模型配置文件路径
        
    Returns:
        包含模型配置信息的字典
    """
    if not os.path.exists(config_path):
        raise FileNotFoundError(f"模型配置文件不存在: {config_path}")
    
    with open(config_path, 'r', encoding='utf-8') as f:
        models_config = yaml.safe_load(f)
    
    return models_config

def get_model_config(model_name: str, models_config: Dict[str, Any]) -> Dict[str, Any]:
    """
    获取指定模型的配置信息
    
    Args:
        model_name: 模型名称
        models_config: 模型配置字典
        
    Returns:
        指定模型的配置信息
    """
    if 'models' not in models_config:
        raise ValueError("模型配置文件格式错误，缺少 'models' 字段")
    
    if model_name not in models_config['models']:
        raise ValueError(f"模型 '{model_name}' 不存在于配置文件中")
    
    return models_config['models'][model_name]

class OpenAISamplingParams:
    """OpenAI采样参数类，与vLLM SamplingParams兼容"""
    
    def __init__(self, temperature: float, max_tokens: int, model: str, n: int, max_n_per_request: int = 4, stop: Optional[List[str]] = None):
        """
        初始化OpenAI采样参数
        
        Args:
            temperature: 生成温度
            max_tokens: 最大生成token数
            model: 模型名称
            n: 总生成数量
            max_n_per_request: 每次请求最大生成数量
            stop: 停止标记列表
        """
        self.temperature = temperature
        self.max_tokens = max_tokens
        self.model = model
        self.n = n
        self.max_n_per_request = max_n_per_request
        self.stop = stop  # 新增stop参数
        self.n_generate_sample = n  # 保持与vLLM兼容

def llm_init(config):
    """
    统一模型初始化接口
    根据 backend_type 选择 vLLM 或 OpenAI
    """
    # 为了兼容性，强制使用 vLLM V0 引擎（V1 有一些参数不支持）
    import os
    if config.backend_type == "vllm" and "VLLM_USE_V1" not in os.environ:
        os.environ["VLLM_USE_V1"] = "0"
        print("为了兼容性，设置 VLLM_USE_V1=0 以使用 vLLM V0 引擎")
    if config.backend_type == "openai":
        # OpenAI API 模式
        try:
            import openai
        except ImportError:
            raise ImportError("使用OpenAI API需要安装openai库: pip install openai")
        
        # 加载模型配置
        models_config = load_models_config(config.models_config_path)
        model_config = get_model_config(config.model_dir, models_config)
        
        # 创建OpenAI客户端
        client = openai.OpenAI(
            api_key=model_config['api_key'],
            base_url=model_config['base_url']
        )
        
        # 处理停止标记
        stop_tokens = None
        if config.stop is not None:
            if isinstance(config.stop, (list, tuple)):
                stop_tokens = list(config.stop)
            else:
                # 如果是 OmegaConf ListConfig，转换为标准列表
                from omegaconf import OmegaConf
                stop_tokens = OmegaConf.to_object(config.stop) if hasattr(config.stop, '__iter__') else [str(config.stop)]
        
        # 创建采样参数
        sampling_params = OpenAISamplingParams(
            temperature=config.temperature,
            max_tokens=config.max_tokens,
            model=model_config['model_name'],
            n=config.n_generate_sample,
            max_n_per_request=model_config.get('max_n_per_request', 4),
            stop=stop_tokens  # 传递stop参数
        )
        
        return client, sampling_params
    
    else:
        # 处理视频处理器配置文件的兼容性问题
        import os
        import shutil
        preprocessor_path = os.path.join(config.model_dir, "preprocessor.json")
        video_preprocessor_path = os.path.join(config.model_dir, "video_preprocessor.json")
        
        if os.path.exists(preprocessor_path) and not os.path.exists(video_preprocessor_path):
            try:
                # 复制 preprocessor.json 为 video_preprocessor.json 来消除警告
                shutil.copy2(preprocessor_path, video_preprocessor_path)
                print(f"已自动创建 video_preprocessor.json 以解决兼容性警告")
            except Exception as e:
                print(f"无法创建 video_preprocessor.json: {e}，但这不会影响功能")
        
        # vLLM 模式（4×A100 优化配置）
        vllm_kwargs = {
            "model": config.model_dir, 
            "tensor_parallel_size": config.tp, 
            "trust_remote_code": True,
            "seed": config.seed if config.seed else 0,
            "swap_space": config.swap_space,
            "max_model_len": config.max_model_len,
            "gpu_memory_utilization": config.llm_gpu_memory_utilization,
            "enforce_eager": True,
            "distributed_executor_backend": 'ray' if config.tp > 1 else None,
            "dtype": "bfloat16",
            "max_num_seqs": getattr(config, 'vllm_max_num_seqs', config.vllm_max_requests),
            "disable_log_stats": config.vllm_disable_log_stats,
            "enable_chunked_prefill": config.vllm_enable_chunked_prefill,
        }
        
        # 添加高级优化参数（如果配置中存在）
        if hasattr(config, 'vllm_enable_prefix_caching') and config.vllm_enable_prefix_caching:
            vllm_kwargs["enable_prefix_caching"] = True
            
        if hasattr(config, 'vllm_max_num_batched_tokens'):
            vllm_kwargs["max_num_batched_tokens"] = config.vllm_max_num_batched_tokens
            
        if hasattr(config, 'vllm_max_seq_len_to_capture'):
            vllm_kwargs["max_seq_len_to_capture"] = config.vllm_max_seq_len_to_capture
            
        # 添加性能监控和调试信息
        if config.tp > 1:
            print(f"🚀 启动多卡推理：{config.tp}卡张量并行")
            print(f"📊 模型路径：{config.model_dir}")
            print(f"💾 GPU内存利用率：{config.llm_gpu_memory_utilization}")
            print(f"🔄 最大并发序列：{getattr(config, 'vllm_max_num_seqs', config.vllm_max_requests)}")
            if hasattr(config, 'vllm_enable_prefix_caching') and config.vllm_enable_prefix_caching:
                print("⚡ 前缀缓存：已启用（MCTS性能关键优化）")
        
        llm = LLM(**vllm_kwargs)
        # 处理 stop 参数，确保它是一个 list 类型
        stop_tokens = None
        if config.stop is not None:
            if isinstance(config.stop, (list, tuple)):
                stop_tokens = list(config.stop)
            else:
                # 如果是 OmegaConf ListConfig，转换为标准列表
                from omegaconf import OmegaConf
                stop_tokens = OmegaConf.to_object(config.stop) if hasattr(config.stop, '__iter__') else [str(config.stop)]
        
        # 构建 SamplingParams，处理 vLLM V1 兼容性问题
        sampling_params_dict = {
            "temperature": config.temperature,
            "top_k": config.top_k,
            "top_p": config.top_p,
            "max_tokens": config.max_tokens,
            "n": config.n_generate_sample,
            "stop": stop_tokens,
            "skip_special_tokens": False,
            "seed": config.seed if config.temperature == 0 else None,
        }
        
        # 创建 SamplingParams（现在使用 vLLM V0，支持所有参数）
        sampling_params = SamplingParams(
            temperature=config.temperature,
            top_k=config.top_k,
            top_p=config.top_p,
            best_of=config.best_of,
            max_tokens=config.max_tokens,
            n=config.n_generate_sample,
            stop=stop_tokens,
            skip_special_tokens=False,
            seed=config.seed if config.temperature == 0 else None,
        )
        return llm, sampling_params

def llm_engine(config):
    llm, sampling_params = llm_init(config)
    return llm, sampling_params

def rm_engine(config):
    if config.need_value_func:
        prm_model = LLM(
            model=config.reward_model_dir, 
            task="reward",
            tensor_parallel_size=1, 
            trust_remote_code=True,
            max_model_len=config.max_model_len,
            enforce_eager=True,
            swap_space=0,
            gpu_memory_utilization=0.98 - config.llm_gpu_memory_utilization, # for qwen 7b, rm need 15G memory
        )
        
        v_head_state = torch.load(os.path.join(config.reward_model_dir, "value_head.bin"), weights_only=True)
        v_state = {}
        for name, param in v_head_state.items():
            v_state[name.replace("v_head.", "")] = param
        model_config = AutoConfig.from_pretrained(config.reward_model_dir, trust_remote_code=True, use_cache = False)
        v_head = ValueHead(model_config)
        v_head.load_state_dict(v_state)
        v_head.eval()
        tokenizer = AutoTokenizer.from_pretrained(config.reward_model_dir, trust_remote_code=True, use_cache = False, split_special_tokens=False,)
        return prm_model, v_head, tokenizer
    else:
        return None, None, None
