# Copy/Paste the contents to a new file demo.py
import torch
from ipex_llm.transformers import AutoModelForCausalLM
from transformers import GenerationConfig
from modelscope import AutoTokenizer
generation_config = GenerationConfig(use_cache=True)

print('Now start loading Tokenizer and optimizing Model...')
tokenizer = AutoTokenizer.from_pretrained(r"D:\code\BERT-NER-Pytorch\wanderlust_companion\models\Qwen\Qwen2___5-3B-Instruct",
                                          trust_remote_code=True)

# Load Model using ipex-llm and load it to GPU
model = AutoModelForCausalLM.from_pretrained(r"D:\code\BERT-NER-Pytorch\wanderlust_companion\models\Qwen\Qwen2___5-3B-Instruct",
                                             load_in_4bit=True,
                                             cpu_embedding=True,
                                             trust_remote_code=True,
                                             model_hub='modelscope')
model = model.to('xpu')
print('Successfully loaded Tokenizer and optimized Model!')

# Format the prompt
# you could tune the prompt based on your own model,
# here the prompt tuning refers to https://huggingface.co/Qwen/Qwen2-1.5B-Instruct#quickstart
question = "What is AI?"
messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": question}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True
)

# Generate predicted tokens
with torch.inference_mode():
   input_ids = tokenizer.encode(text, return_tensors="pt").to('xpu')
   print('--------------------------------------Note-----------------------------------------')
   print('| For the first time that each model runs on Intel iGPU/Intel Arc? A300-Series or |')
   print('| Pro A60, it may take several minutes for GPU kernels to compile and initialize. |')
   print('| Please be patient until it finishes warm-up...                                  |')
   print('-----------------------------------------------------------------------------------')

   # To achieve optimal and consistent performance, we recommend a one-time warm-up by running `model.generate(...)` an additional time before starting your actual generation tasks.
   # If you're developing an application, you can incorporate this warm-up step into start-up or loading routine to enhance the user experience.
   output = model.generate(input_ids,
                           do_sample=False,
                           max_new_tokens=32,
                           generation_config=generation_config) # warm-up

   print('Successfully finished warm-up, now start generation...')

   output = model.generate(input_ids,
                           do_sample=False,
                           max_new_tokens=32,
                           generation_config=generation_config).cpu()
   output_str = tokenizer.decode(output[0], skip_special_tokens=False)
   print(output_str)