# Licensed under the MIT license.

from vllm import LLM, SamplingParams
from transformers import AutoTokenizer
import numpy as np
import math


def load_vLLM_model(model_ckpt, seed, tensor_parallel_size=1, half_precision=False, max_num_seqs=256, qwen3_disable_thinking=False):
    """
    加载vLLM模型
    
    Args:
        model_ckpt: 模型检查点路径
        seed: 随机种子
        tensor_parallel_size: 张量并行大小
        half_precision: 是否使用半精度
        max_num_seqs: 最大序列数
        qwen3_disable_thinking: 是否禁用Qwen3的thinking模式
    
    Returns:
        tokenizer, llm: 分词器和模型对象
    """
    import os
    import gc
    import time
    
    # 预清理GPU内存
    try:
        import torch
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            torch.cuda.synchronize()
            print("已清理CUDA缓存")
    except ImportError:
        pass
    
    # 强制垃圾回收
    gc.collect()
    
    # 等待一小段时间确保内存释放
    time.sleep(1)
    
    tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
    
    # Qwen3特殊处理：设置thinking模式
    if qwen3_disable_thinking and "Qwen3" in model_ckpt:
        tokenizer.enable_thinking = False

    # 为Qwen3模型设置特殊配置
    extra_kwargs = {}
    if "Qwen3" in model_ckpt:
        print("检测到Qwen3模型，正在应用兼容性设置...")
        # 禁用torch.compile以避免不兼容问题
        extra_kwargs["disable_custom_all_reduce"] = True
        # 设置编译级别为0以避免内存问题
        extra_kwargs["compilation_config"] = {"level": 0}
        print("已设置Qwen3兼容性环境变量")

    # 添加内存优化配置
    extra_kwargs.update({
        "enforce_eager": False,  # 保持默认值
        "enable_prefix_caching": True,  # 启用前缀缓存
        "gpu_memory_utilization": 0.85,  # 限制GPU内存使用率
        "max_model_len": None,  # 使用模型默认长度
        "disable_custom_all_reduce": True,  # 禁用自定义all_reduce，避免内存问题
        "disable_log_stats": True,  # 禁用日志统计，减少内存开销
        "enable_chunked_prefill": True,  # 启用分块预填充
        "max_num_batched_tokens": 8192,  # 限制批处理token数量
    })

    # 添加V1引擎特定的内存管理配置
    if hasattr(os.environ, 'get') and os.environ.get('VLLM_DISABLE_STRICT_MEMORY_CHECK'):
        extra_kwargs.update({
            "compilation_config": {
                "level": 0,  # 禁用编译优化，避免内存检查问题
                "use_inductor": False,  # 禁用inductor
                "use_cudagraph": False,  # 禁用CUDA图，避免内存状态冲突
            }
        })
        print("已应用V1引擎内存检查绕过配置")

    print(f"正在加载模型: {model_ckpt}")
    print(f"张量并行大小: {tensor_parallel_size}")
    print(f"半精度: {half_precision}")
    print(f"最大序列数: {max_num_seqs}")
    
    try:
        if half_precision:
            llm = LLM(
                model=model_ckpt,
                dtype="half",
                tensor_parallel_size=tensor_parallel_size,
                seed=seed,
                trust_remote_code=True,
                max_num_seqs=max_num_seqs,
                swap_space=16,
                **extra_kwargs,
            )
        else:
            llm = LLM(
                model=model_ckpt,
                tensor_parallel_size=tensor_parallel_size,
                seed=seed,
                trust_remote_code=True,
                max_num_seqs=max_num_seqs,
                swap_space=16,
                **extra_kwargs,
            )
        
        print("模型加载成功")
        
    except Exception as e:
        print(f"模型加载失败: {e}")
        # 清理资源
        gc.collect()
        try:
            import torch
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
        except ImportError:
            pass
        raise

    return tokenizer, llm


def generate_with_vLLM_model(
    model,
    input,
    temperature=0.8,
    top_p=0.95,
    top_k=40,
    repetition_penalty=1.1,
    n=1,
    max_tokens=256,
    logprobs=1,
    stop=[],
):
    sampling_params = SamplingParams(
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
        repetition_penalty=repetition_penalty,
        n=n,
        logprobs=logprobs,
        max_tokens=max_tokens,
        stop=stop,
    )

    output = model.generate(input, sampling_params, use_tqdm=False)
    return output


if __name__ == "__main__":
    model_ckpt = "mistralai/Mistral-7B-v0.1"
    tokenizer, model = load_vLLM_model(model_ckpt, seed=42, tensor_parallel_size=1, half_precision=False)
    input = "What is the meaning of life?"
    output = generate_with_vLLM_model(model, input)
    breakpoint()
    print(output[0].outputs[0].text)
