import time

import torch
from ipex_llm.transformers import AutoModelForCausalLM
from modelscope import AutoTokenizer, snapshot_download

class Qwen2ChatModel:
    def __init__(self, model_name='Qwen/Qwen2.5-7B-Instruct',
                 local_dir='C:\models\Qwen\Qwen2.5-7B-Instruct',
                 max_new_tokens=2048):
        self.model_dir = snapshot_download(model_name, local_dir=local_dir)
        self.model = AutoModelForCausalLM.from_pretrained(self.model_dir,
                                                          load_in_4bit=True,
                                                          optimize_model=True,
                                                          trust_remote_code=True,
                                                          use_cache=True)
        self.device = torch.device("xpu") if torch.xpu.is_available() else torch.device("cpu")
        self.model = self.model.half().to(self.device)
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_dir, trust_remote_code=True)
        self.max_new_tokens = max_new_tokens

    def chat(self, prompt):
        messages = [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": prompt}
        ]
        text = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
        model_inputs = self.tokenizer([text], return_tensors="pt").to("xpu")

        st = time.time()
        generated_ids = self.model.generate(model_inputs.input_ids, max_new_tokens=self.max_new_tokens)
        torch.xpu.synchronize()
        end = time.time()

        generated_ids = generated_ids.cpu()
        generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in
                         zip(model_inputs.input_ids, generated_ids)]
        response = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]

        print(f'Inference time: {end - st} s')
        return response
