import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from gxl_ai_utils.utils import utils_file
import os
# os.environ['HF_ENDPOINT']="https://hf-mirror.com"  # 在命令行里面加入就可以了，别的代码完全不用动
# export HF_ENDPOINT=https://hf-mirror.com
#export HF_HOME=/mnt/sfs/asr/ckpt
#export TRANSFORMERS_CACHE=/mnt/sfs/asr/ckpt

# 加载 tokenizer 和模型，并指定 cache_dir 保存模型文件
# model_path = "/home/work_nfs15/asr_data/ckpt/Phi-3.5-mini-instruct/models--microsoft--Phi-3.5-mini-instruct/snapshots/af0dfb8029e8a74545d0736d30cb6b58d2f0f3f0"
# model_path ="/mnt/sfs/.cache/huggingface/hub/models--Qwen--Qwen2-7B/snapshots/453ed1575b739b5b03ce3758b23befdb0967f40e"
# model_path = "/mnt/sfs/asr/env/.cache/transformers/models--Qwen--Qwen2.5-7B-Instruct-1M/models--Qwen--Qwen2.5-7B-Instruct-1M/snapshots/e28526f7bb80e2a9c8af03b831a9af3812f18fba"
model_path = "/mnt/sfs/asr/env/.cache/transformers/models--Qwen--Qwen2.5-3B-Instruct/snapshots/aa8e72537993ba99e69dfaafa59ed015b17504d1"
# 7B-2.5-instruct  : bos:151643 , eos:151645, 词表： 152064
# text: <|endoftext|>,id: tensor([[151643]], device='npu:6')
# id: 151643,text: <|endoftext|>
# id: 151645,text: <|im_end|>
# 3B  : eos: bos:151643 , eos:151645,  词表： 151936
# text: <|endoftext|>,id: tensor([[151643]], device='npu:6')
# id: 151643,text: <|endoftext|>
# id: 151645,text: <|im_end|>
# model_path = "Qwen/Qwen2.5-3B-Instruct"
model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True,torch_dtype=torch.bfloat16,)
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True,)
device = torch.device("npu:6")
model.to(device)
print(model)

def chat(input_q_text):
    prompt = input_q_text
    # messages = [
    #     {"role": "system", "content": "You are a helpful assistant."},
    #     {"role": "user", "content": prompt}
    # ]
    messages = [
        {"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
        {"role": "user", "content": prompt}
    ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    # print(f'text: {text}')
    print(f'text repr: {repr(text)}')
    model_inputs = tokenizer([text], return_tensors="pt").to(device)
    print(f'model_inputs: {model_inputs.input_ids}')

    generated_ids = model.generate(
        model_inputs.input_ids,
        max_new_tokens=512
    )
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    print(f'generated_ids: {generated_ids}')

    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
    return response

# 获取词表大小
vocab_size = model.lm_head.weight.shape[1]
print(f"词表大小: {vocab_size}")
vocab_size = model.lm_head.weight.shape[0]
print(f"词表大小: {vocab_size}")

# 获取EOS的ID
eos_token_id = tokenizer.eos_token_id
print(f"EOS的ID: {eos_token_id}")

text = "<|endoftext|>"
id = tokenizer([text], return_tensors="pt").input_ids.to(device)
print(f"text: {text},id: {id}")

id = 151643
text = tokenizer.decode(id)
print(f"id: {id},text: {text}")
id = 151645
text = tokenizer.decode(id)
print(f"id: {id},text: {text}")

while True:
    input_q_text = input("User: ")
    response = chat(input_q_text)
    print("Assistant:", response)

