import os
import torch
import asyncio

from   vllm import SamplingParams

from   vllm.engine.async_llm_engine import AsyncLLMEngine
from   vllm.engine.arg_utils        import AsyncEngineArgs

os.environ["VLLM_USE_V1"]  = "0"


prompt = "很高兴见到你, " 
model  = "/data/yangxianpku/models/Qwen/Qwen2.5-7B-Instruct"

async def main():
    # 使用 AsyncEngineArgs 配置参数
    engine_args = AsyncEngineArgs(
                            model=model,
                            tensor_parallel_size=2,
                            pipeline_parallel_size=2,
                            trust_remote_code=True,
                            dtype=torch.bfloat16,
                            gpu_memory_utilization=0.8,
                            max_model_len=4096,
                            disable_log_stats=False,
                        )
                        
    # 创建异步引擎
    async_engine    = AsyncLLMEngine.from_engine_args(engine_args)
    
    # 创建采样参数
    #! 启动PP并行需要使用异步引擎以提升性能
    sampling_params = SamplingParams(
                                temperature=0, 
                                top_p=0.95, 
                                max_tokens=100
                              )
    

    # 提交请求到引擎
    results_generator = async_engine.generate(prompt, 
                                            sampling_params,
                                            "001"
                                        )
    
    # 获取结果
    async for output in results_generator:
        print(f"Generated text: {output.outputs[0].text}")
    
    # 获取引擎配置信息
    print("=== 引擎配置 ===")
    print(f"TP size: {async_engine.engine.parallel_config.tensor_parallel_size}")
    print(f"PP size: {async_engine.engine.parallel_config.pipeline_parallel_size}")

# 运行异步主函数
if __name__ == "__main__":
    asyncio.run(main())