import asyncio
import time
from vllm import AsyncLLMEngine, AsyncEngineArgs, SamplingParams
from transformers import AutoModelForCausalLM, AutoTokenizer
import re
import readline

path = "/data/mccxadmin/zhaojiang.xue/models/Qwen2.5-7B-Instruct-GPTQ-Int4-cvt-tp1"
path = "/data/mccxadmin/zhaojiang.xue/models/Qwen2-7B-Instruct-cvt-tp1"
path = "/data/mccxadmin/zhaojiang.xue/NeuroTrim/examples/quantization/qwen2_7b_inst_group_cvt_tp1"
path = "/home/mt/zhaojiang.xue/Qwen2.5-1.5B-Instruct-cvt-tp1"
path = "/home/mt/zhaojiang.xue/qwen2_7b_inst_group_cvt_tp1"

engine_args = AsyncEngineArgs(model=path,
                              gpu_memory_utilization=0.45,
                              enforce_eager=True,
                              disable_log_stats=True,
                              disable_log_requests=True,
                              enable_prefix_caching=True,
                              device="musa",
                              block_size=64,
                              max_num_seqs=1,
                              max_model_len=2048,
                              max_num_batched_tokens=2048)
# initialize engine and request arguments

tokenizer = AutoTokenizer.from_pretrained(path)
model = AsyncLLMEngine.from_engine_args(engine_args)
sampling_param = SamplingParams(temperature=0.0, max_tokens=2048)


def split_string_by_multiple_keywords(string, keywords):
  # 将关键字组合成正则表达式
  pattern = '|'.join(keywords)
  # 使用正则表达式分割字符串
  result = re.split(pattern, string)
  # 移除空字符串，可选
  return [part for part in result if part]


def build_chat_template(prompt):
  ret = split_string_by_multiple_keywords(prompt, ["system: ", "user: "])
  # print(ret)
  prompt = "Give me a short introduction to large language model."
  if len(ret) >= 1:
    prompt = ret[-1]
  system = "You are a helpful assistant."
  if len(ret) == 2:
    system = ret[0]
  messages = [{"role": "system", "content": system}, {"role": "user", "content": prompt}]

  text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
  return text


async def generate_streaming():
  while True:
    prompt = input(">>>")
    if prompt == 'stop':
      break
    text = build_chat_template(prompt)
    results_generator = model.generate(text, sampling_param, request_id=time.monotonic())
    previous_text = ""
    time_a = 0
    async for request_output in results_generator:
      if time_a == 0:
        time_a = time.time()
      text = request_output.outputs[0].text
      print(text[len(previous_text):], end='', flush=True)
      previous_text = text
    time_b = time.time()
    print("\n")
    print("###### ", len(request_output.outputs[0].token_ids) / (time_b - time_a), " tps")


try:
  asyncio.run(generate_streaming())
except asyncio.exceptions.CancelledError:
  print('canceled!')
# system: You are a helpful assistant. user: Give me a short introduction to large language model.
# 给我讲一个年轻人奋斗创业最终取得成功的故事。
# 介绍一下特朗普
