import os
import torch
import time
import pandas as pd
from ipex_llm.transformers import AutoModelForCausalLM
import transformers
from transformers import AutoTokenizer

def inference(model, tokenizer, prompt: str) -> float:
    prompt = f"用最短的话问答问题：{prompt}"
    messages = [{"role": "user", "content": prompt}]

    with torch.inference_mode():
        text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
        model_inputs = tokenizer([text], return_tensors="pt").to('cpu')

        start = time.time()
        generated_ids = model.generate(
            model_inputs.input_ids,
            temperature=0.6,
            max_new_tokens=128)
        end = time.time()

        processed_generated_ids = []
        for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids):
            input_length = len(input_ids)
            new_tokens = output_ids[input_length:]
            processed_generated_ids.append(new_tokens)

        generated_ids = processed_generated_ids
        response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
        
        # 打印推理时间
        t = end-start
        print(f"{response}\nInference time: {t:.2f} s\n\n\n")

        return t

if __name__ == '__main__':
    NUM = 200

    os.environ["OMP_NUM_THREADS"] = "8"

    df = pd.read_csv("./dataset/CoT_Chinese_data.csv")
    QList = df.sample(n=NUM)['instruction']
    print(len(QList))

    ### ipex model
    ipex_load_path = "./model/qwen2_int4"
    ipex_model = AutoModelForCausalLM.load_low_bit(ipex_load_path, trust_remote_code=True)
    ipex_tokenizer = AutoTokenizer.from_pretrained(ipex_load_path, trust_remote_code=True)

    ipex_total = 0.0
    for i, query in enumerate(QList):
        print(f"Query: {query}")
        t = inference(ipex_model, ipex_tokenizer, query)
        ipex_total += t
        print(f"[{i}] {ipex_total:.2f}")

    print(f"\nTotal: {ipex_total}")

    ### raw mode
    raw_load_path = "./model/qwen2chat_src/Qwen/Qwen2-1___5B-Instruct"
    raw_model = transformers.AutoModelForCausalLM.from_pretrained(raw_load_path,torch_dtype=torch.float16, trust_remote_code=True)
    raw_tokenizer = AutoTokenizer.from_pretrained(raw_load_path, trust_remote_code=True)

    raw_total = 0.0
    for i, query in enumerate(QList):
        print(f"Query: {query}")
        t = inference(raw_model, raw_tokenizer, query)
        raw_total += t
        print(f"[{i}] {raw_total:.2f}")

    print(f"\nTotal: {raw_total}")

    ## total
    print(f"\nNum of queries: {NUM}\nIPEX Total: {ipex_total}\nRaw Total: {raw_total}")