import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from gxl_ai_utils.utils import utils_file
import os
# os.environ['HF_ENDPOINT']="https://hf-mirror.com"  # 在命令行里面加入就可以了，别的代码完全不用动
# export HF_ENDPOINT=https://hf-mirror.com
#export HF_HOME=/mnt/sfs/asr/ckpt
#export TRANSFORMERS_CACHE=/mnt/sfs/asr/ckpt

# 加载 tokenizer 和模型，并指定 cache_dir 保存模型文件
# model_path = "/home/work_nfs15/asr_data/ckpt/Phi-3.5-mini-instruct/models--microsoft--Phi-3.5-mini-instruct/snapshots/af0dfb8029e8a74545d0736d30cb6b58d2f0f3f0"
# model_path ="/mnt/sfs/.cache/huggingface/hub/models--Qwen--Qwen2-7B/snapshots/453ed1575b739b5b03ce3758b23befdb0967f40e"
# model_path = "/mnt/sfs/asr/env/.cache/transformers/models--Qwen--Qwen2.5-7B-Instruct-1M/models--Qwen--Qwen2.5-7B-Instruct-1M/snapshots/e28526f7bb80e2a9c8af03b831a9af3812f18fba"
model_path = "/mnt/sfs/asr/env/.cache/transformers/models--Qwen--Qwen2.5-3B-Instruct/snapshots/aa8e72537993ba99e69dfaafa59ed015b17504d1"
# 7B-2.5-instruct  : bos:151643 , eos:151645, 词表： 152064
# 3B  : eos: bos:151643 , eos:151645,  词表： 151936
# model_path = "Qwen/Qwen2.5-3B-Instruct"
model_base = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True,torch_dtype=torch.bfloat16,)
tokenizer_base = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True,)
device = torch.device("npu:5")
model_base.to(device)

def chat(input_q_text):
    prompt = input_q_text
    messages = [
        {"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
        {"role": "user", "content": prompt}
    ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    # print(f'text: {text}')
    print(f'text repr: {repr(text)}')
    model_inputs = tokenizer([text], return_tensors="pt").to(device)
    print(f'model_inputs: {model_inputs.input_ids}')

    generated_ids = model.generate(
        model_inputs.input_ids,
        max_new_tokens=512
    )
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    print(f'generated_ids: {generated_ids}')

    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
    return response

# # 获取词表大小
# vocab_size = model.lm_head.weight.shape[0]
# print(f"词表大小: {vocab_size}")
#
# # 获取EOS的ID
# eos_token_id = tokenizer.eos_token_id
# print(f"EOS的ID: {eos_token_id}")
from peft import LoraConfig, TaskType, get_peft_model
utils_file.logging_limit_print("耿雪龙： 使用lora了")
# target_modules = ['w_pack', 'o_proj', 'gate_proj', 'down_proj']
target_modules = ['q_proj', 'k_proj', 'v_proj', 'o_proj', 'gate_proj', 'down_proj']
peft_config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    inference_mode=False,
    r=8,
    lora_alpha=32,
    lora_dropout=0.1,
    target_modules=target_modules,
)
model = get_peft_model(model_base, peft_config)
print(model)
for name, param in model.named_parameters():
    print(name, param.shape, param.requires_grad)
utils_file.print_model_size(model)
# while True:
#     input_q_text = input("User: ")
#     response = chat(input_q_text)
#     print("Assistant:", response)
while True:
    rank = input("请输入rank：")
    rank = int(rank)
    peft_config = LoraConfig(
        task_type=TaskType.CAUSAL_LM,
        inference_mode=False,
        r=rank,
        lora_alpha=32,
        lora_dropout=0.1,
        target_modules=target_modules,
    )
    model = get_peft_model(model_base, peft_config)
    print(model)
    for name, param in model.named_parameters():
        print(name, param.shape, param.requires_grad)
    utils_file.print_model_size(model)


