from vllm import AsyncLLMEngine
from vllm.engine.arg_utils import AsyncEngineArgs
import os


class VLLMServer:
    def __init__(self, model_path, gpu_memory_utilization=0.85):
        # 配置优化参数（针对4060显卡）
        engine_args = AsyncEngineArgs(
            model=model_path,
            tensor_parallel_size=1,  # 单卡运行
            gpu_memory_utilization=gpu_memory_utilization,
            quantization="gptq",  # GPTQ 4bit量化
            max_model_len=4096,  # 最大上下文长度
            enforce_eager=True,  # 减少显存开销
            disable_log_stats=True  # 关闭冗余日志
        )

        # 初始化引擎
        self.engine = AsyncLLMEngine.from_engine_args(engine_args)
        print(f"vLLM引擎已启动，模型: {model_path}")

    async def generate(self, prompt, max_tokens=512, temperature=0.2):
        """生成响应"""
        from vllm import SamplingParams

        # 采样参数配置
        sampling_params = SamplingParams(
            temperature=temperature,
            top_p=0.9,
            max_tokens=max_tokens,
            skip_special_tokens=True
        )

        # 生成请求ID
        import uuid
        request_id = f"req_{uuid.uuid4().hex}"

        # 处理请求
        results_generator = self.engine.generate(
            prompt,
            sampling_params,
            request_id
        )

        # 收集完整响应
        final_output = None
        async for output in results_generator:
            final_output = output

        return final_output.outputs[0].text.strip()

    @classmethod
    def get_instance(cls):
        """单例模式获取实例"""
        if not hasattr(cls, "_instance"):
            model_path = os.getenv("MODEL_PATH", "models/Qwen3-4B-unsloth-bnb-4bit.Q4_K_M.gguf")
            cls._instance = cls(model_path)
        return cls._instance