import torch.nn
from peft import  LoraConfig, get_peft_model

from modelscope import AutoTokenizer, AutoModel, snapshot_download
model_path = r"k:\models\chatglm3-6b"
model_dir = snapshot_download("ZhipuAI/chatglm3-6b", use_safetensors=True,  # 确保使用safetensors
    device_map='auto' # 忽略非safetensors文件
    )# 显式要求safetensors格式)
# snapshot_download.
model = AutoModel.from_pretrained(model_path, trust_remote_code=True,use_safetensors=True).half().cuda()

def print_trainable_parameters(model):
    """
    Prints the number of trainable parameters in the model.
    """
    trainable_params = 0
    all_param = 0
    for _, param in model.named_parameters():
        all_param += param.numel()
        if param.requires_grad:
            trainable_params += param.numel()
    print(
        f"trainable params: {trainable_params} || "
        f"all params: {all_param} || "
        f"trainable: {100 * trainable_params / all_param}%"
    )

def find_all_target_names(model,target_moude = torch.nn.Linear):
    lora_module_names = set()
    for name, module in model.named_modules():
        if type(module) == target_moude:
            names = name.split('.')
            lora_module_names.add(names[0] if len(names) == 1 else names[-1])

    if "lm_head" in lora_module_names:  # needed for 16-bit
        lora_module_names.remove("lm_head")

    return list(lora_module_names)


lora_config = LoraConfig(
    r=64,
    lora_alpha=16,
    #target_modules=find_all_target_names(model),     #这里是显式的告诉在哪里使用RoLA
    target_modules=["query_key_value"],
    #target_modules=["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"],
    lora_dropout=0.05,
    bias="none",
    task_type="CAUSAL_LM",  #SEQ_2_SEQ_LM
)


model = get_peft_model(model, lora_config)
print_trainable_parameters(model)

model.save_pretrained("./lora_saver/lora_query_key_value.pth")


