from vllm import LLM, SamplingParams
import numpy as np

prompts = [
    # 9906 ==> [1, embedding_dim = 4096] 
    # [6, 4096] ==> matrix
    "Hello, my name is",     #  'prompt_token_ids': [128000, 9906, 11, 856, 836, 374]
    # "The future of AI is", #  'prompt_token_ids': [128000, 791, 3938, 315, 15592, 374]
    "The life is",           # [128000, 791, 2324, 374]
]
sampling_params = SamplingParams(temperature=0.8, top_p=0.95, max_tokens=4)

# meta-llama/Llama-2-7b-hf 权重已下载到本地的 ckpts 路径
# 如果还没下载，可以直接 llm = LLM(model="meta-llama/Llama-2-7b-hf")
#！ <vllm.entrypoints.llm.LLM object at 0x7fd4bd2e6e30>
model_path = "/home/yangxianpku/models/LLM-Research/Meta-Llama-3-8B-Instruct"
# model_path = "/home/yangxianpku/models/swift/Meta-Llama-3-8B-Instruct-AWQ"
# model_path = "/home/yangxianpku/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
# model_path = "/home/yangxianpku/models/AI-ModelScope/e5-mistral-7b-instruct"
llm = LLM(model=model_path, max_model_len=8192, 
          tensor_parallel_size=1,
          #   task="embed"   # e5-mistral-7b-instruct
          enforce_eager=True,     #! 使用eager模式(不用cuda graph)便于查看模型推理的细节
        )



print("===================================WARMUP结束===================================")
outputs = llm.generate(prompts, sampling_params)

# Print the outputs.
# vLLM V1: https://blog.vllm.ai/2025/01/27/v1-alpha-release.html
# export VLLM_USE_V1=1.
# pip install --no-build-isolation -e . --verbose
# torch 2.5.1 下编译vllm0.6.6， 然后torch改为2.4. 0. 能运行成功(VLLM_USE_V1=0)
# 原始版本vllm==0.6.6 + torch2.4.0 运行失败，  vllm==0.6.3 + torch2.4.0 运行成功
# 源码编译: https://learn.arm.com/learning-paths/servers-and-cloud-computing/vllm/vllm-setup/
# filename=vllm-0.6.6-0.editable-cp310-cp310-linux_x86_64.whl
for output in outputs:
    prompt = output.prompt
    generated_text = output.outputs[0].text
    print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")


