import vllm
from vllm import LLM, SamplingParams
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "Qwen/Qwen2.5-7B-Instruct-GPTQ-Int4"

print("=====generate=====")
path = "/data/mccxadmin/zhaojiang.xue/models/Qwen2.5-7B-Instruct-GPTQ-Int4-cvt-tp1"
path = "/data/mccxadmin/zhaojiang.xue/models/Qwen2-7B-Instruct-cvt-tp1"
path = "/home/mt/zhaojiang.xue/Qwen2.5-1.5B-Instruct-cvt-tp1"
path = "/home/mt/zhaojiang.xue/qwen2_7b_inst_group_cvt_tp1"
path = "/home/mt/zhaojiang.xue/qwen2_7b_dpo_group_cvt_tp1"


# export MTT_LIB_PATH=/data/mtt/MT-Transformer/build/src/mttransformer/th_op/libmt_transformer_pt.so
llm = LLM(model=path, gpu_memory_utilization = 0.4, tensor_parallel_size = 1, device = "musa", block_size = 64,
          max_num_seqs = 1, max_model_len = 2048, max_num_batched_tokens = 2048,
          disable_log_stats = False)
print("llm construct end")

tokenizer = AutoTokenizer.from_pretrained(path)
prompt = "Give me a short introduction to large language model."
messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": prompt}
]
text0 = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
prompt = "Give me a short introduction to large language model."
messages = [
    {"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
    {"role": "user", "content": prompt}
]
text1 = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
prompt = "给我讲一个年轻人奋斗创业最终取得成功的故事。"
messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": prompt}
]
text2 = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)
prompt = "You're standing on the surface of the Earth. "\
    "You walk one mile south, one mile west and one mile north. "\
    "You end up exactly where you started. Where are you?"
messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": prompt}
]
text3 = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)

sampling_params = SamplingParams(temperature=0, top_k = 1, top_p=0.95, max_tokens=1000)
for i in range(1):
  outputs = llm.generate([text0, text1, text2, text3], sampling_params)
  print(f"\n")
  for idx, output in enumerate(outputs):
    prompt = output.prompt
    generated_text = output.outputs[0].text
    print(f"Prompt {idx}: {prompt}, \nGenerated text: {generated_text}\n\n")
