import argparse
from mixserve import OfflineDriverLLM, SamplingParams,DriverEngine
from mixserve.config import (
    ModelConfig,
    DisaggParallelConfig,
    ParallelConfig,
    CacheConfig,
    ContextStageSchedConfig,
    DecodingStageSchedConfig
)
import asyncio

parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, help='The model to use', default='/data0/model/Qwen3-8B/Qwen3_8B-bin/')
args = parser.parse_args()

async def main():
    prompts = [
   "To be or not to be"*300]*50

    # Create a sampling params object.
    sampling_params = SamplingParams(
        temperature=0.8, top_p=0.95, max_tokens=200, stop=["\n"],ignore_eos=True
    )
    
    llm=OfflineDriverLLM(model_config=ModelConfig(
        model=args.model,
        tokenizer=None
    ),disagg_parallel_config=DisaggParallelConfig(
        context=ParallelConfig(
            tensor_parallel_size=1,
            pipeline_parallel_size=1
        ),
        decoding=ParallelConfig(
            tensor_parallel_size=1,
            pipeline_parallel_size=1
        )
    ),cache_config=CacheConfig(
        block_size=16,
        max_num_blocks_per_req=256,
        gpu_memory_utilization=0.9,
        cpu_swap_space=1.0
    ),context_sched_config=ContextStageSchedConfig(
        policy="fcfs",
        max_batch_size=64,
        max_tokens_per_batch=2048*64,
    ),decoding_sched_config=DecodingStageSchedConfig(
        policy="fcfs",
        max_batch_size=64,
        max_tokens_per_batch=2048*64
    ),profile_first=False)
    await llm.async_init()  
    
    outputs = await llm.generate(
        prompts=prompts,
        sampling_params=sampling_params
    )
    await asyncio.sleep(1)
    print(len(outputs))

if __name__ == "__main__":
    asyncio.run(main())