
import os

from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
from peft import PeftModel
from peft import LoraConfig, TaskType, get_peft_model

#直接利用这个就行，然后利用checkout以及base模型然后加载权重然后保存为lora微调参数合并的transformers模型


device = 'cuda' if torch.cuda.is_available() else 'cpu'
# 需要保存的lora路径
lora_path= "./lora/Qwen2-1___5B-Instruct"

# 加载原下载路径的tokenizer
tokenizer = AutoTokenizer.from_pretrained("./qwen/Qwen2-1___5B-Instruct/", use_fast=False, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("./qwen/Qwen2-1___5B-Instruct/", device_map="auto", torch_dtype=torch.bfloat16)

# 等于训练时的config参数
config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    target_modules=[
        "q_proj",
        "k_proj",
        "v_proj",
        "o_proj",
        "gate_proj",
        "up_proj",
        "down_proj",
    ],
    inference_mode=False,  # 训练模式
    r=8,  # Lora 秩
    lora_alpha=32,  # Lora alaph，具体作用参见 Lora 原理
    lora_dropout=0.1,  # Dropout 比例
)


# 加载训练好的Lora模型，将下面的checkpoint-[XXX]替换为实际的checkpoint文件名名称
lora_model = PeftModel.from_pretrained(model, model_id="./output/Qwen1.5/checkpoint-500",torch_dtype=torch.float16,config=config)
# 保存模型
model = lora_model.merge_and_unload()
model.save_pretrained(lora_path)
#回头探究一下这个是否有必要
#tokenizer.pad_token = tokenizer.eos_token
# 保存tokenizer
tokenizer.save_pretrained(lora_path)