# benchmark.py
import time
import asyncio

from simple_vllm.simple_engine import SimpleLLMEngine


async def benchmark_simple_engine():
    engine = SimpleLLMEngine("gpt2", max_batch_size=4)

    # 准备测试数据
    prompts = [
        "The quick brown fox",
        "Hello, how are you?",
        "What is the meaning of life?",
        "In a hole in the ground there lived a hobbit"
    ]

    # 添加请求
    request_ids = []
    for prompt in prompts:
        req_id = engine.add_request(prompt, max_tokens=50)
        request_ids.append(req_id)

    # 执行推理
    start_time = time.time()
    results = {}

    while engine.has_unfinished_requests():
        step_results = engine.step()
        results.update(step_results)

    end_time = time.time()

    # 输出结果
    print(f"Total time: {end_time - start_time:.2f}s")
    print(f"Throughput: {len(prompts) / (end_time - start_time):.2f} requests/s")

    for req_id, result in results.items():
        print(f"Request {req_id}: {result[:100]}...")


if __name__ == "__main__":
    asyncio.run(benchmark_simple_engine())