import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from gxl_ai_utils.utils import utils_file
import os
model_path = "/home/work_nfs9/sywang/ckpt/Qwen2.5-3B-Instruct"
model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True,torch_dtype=torch.bfloat16,)
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True,)
device = torch.device("cuda:7")
model.to(device)
print(model)

def chat(input_q_text):
    prompt = input_q_text
    messages = [
        {"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
        {"role": "user", "content": prompt}
    ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    # print(f'text: {text}')
    print(f'text repr: {repr(text)}')
    model_inputs = tokenizer([text], return_tensors="pt").to(device)
    print(f'model_inputs: {model_inputs.input_ids}')

    generated_ids = model.generate(
        model_inputs.input_ids,
        max_new_tokens=512
    )
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    print(f'generated_ids: {generated_ids}')

    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
    return response

# 获取词表大小
vocab_size = model.lm_head.weight.shape[1]
print(f"词表大小: {vocab_size}")
vocab_size = model.lm_head.weight.shape[0]
print(f"词表大小: {vocab_size}")

# 获取EOS的ID
eos_token_id = tokenizer.eos_token_id
print(f"EOS的ID: {eos_token_id}")

text = "<|endoftext|>"
id = tokenizer([text], return_tensors="pt").input_ids.to(device)
print(f"text: {text},id: {id}")

id = 151643
text = tokenizer.decode(id)
print(f"id: {id},text: {text}")
id = 151645
text = tokenizer.decode(id)
print(f"id: {id},text: {text}")

input_q_text = "中国和日本的关系是什么？"
response = chat(input_q_text)
print("Assistant:", response)

lm_head = model.lm_head
print(f"lm_head: {lm_head}")
# import pdb;pdb.set_trace()
# print('开始探索lm_head')
# 将lm_head的参数存储到文件
parm_path = "./lm_head.pt"
# 使用torch.save()方法保存模型参数
torch.save(lm_head.state_dict(), parm_path)
# 读取模型参数
# 打印lm_head的参数
print(f"lm_head.weight: {lm_head.weight}")
new_lm_head = torch.nn.Linear(2048, 151936, bias=False)
new_lm_head.load_state_dict(torch.load(parm_path))
print(f"new_lm_head.weight: {new_lm_head.weight}")

