import asyncio
import time
from vllm import AsyncLLMEngine, AsyncEngineArgs, SamplingParams

path = "/data/mtt/model_convert/llama-2-7b-chat-hf-fp16-convert-new/"
engine_args = AsyncEngineArgs(model=path,
                        gpu_memory_utilization = 0.4,
                        download_dir='/data/ljjia/MT-Transformer/build/src/mttransformer/th_op/libmt_transformer_pt.so', enforce_eager=True, disable_log_stats=True,  enable_prefix_caching=True,
                        device = "musa", block_size = 64, max_num_seqs = 128)
# initialize engine and request arguments

# engine_args = AsyncEngineArgs(model="facebook/opt-125m", enforce_eager=True)
model = AsyncLLMEngine.from_engine_args(engine_args)
sampling_param=SamplingParams(temperature=0.0, max_tokens=2048)


async def generate_streaming():
    history_text=""
    while True:
        prompt = input(">>>")
        if prompt == 'stop':
            break
        # [INST] Can you tell me 2 tourist attraction about Paris? [/INST]
        # [INST] Can you tell me something about #2? [/INST]
        # [INST] I am going to Paris, where should I go? [/INST]
        # prompt = test_prompt1
        history_text+=prompt
        # results_generator = model.generate(prompt, SamplingParams(temperature=0.0, max_tokens=1024), request_id=time.monotonic())
        results_generator = model.generate(history_text, sampling_param, request_id=time.monotonic())
        previous_text = ""
        print(f'Prompt:{prompt}\nGenerateText:\n')
        async for request_output in results_generator:
            text = request_output.outputs[0].text
            print(text[len(previous_text):], end='', flush=True)
            previous_text = text
        print("\n")
        # optional
        # previous_text = previous_text + model.engine.get_tokenizer().eos_token + "\n"
        history_text += previous_text
        print(f"!!!!!!!!!!!!!!!!!!!finish a dialog!!!!!!!!!!!!!!!!!!!!!!!\n{history_text}")
        # break



asyncio.run(generate_streaming())
