from peft import LoraConfig, get_peft_model, TaskType
from transformers import AutoModelForCausalLM, Trainer, TrainingArguments, AutoTokenizer
from datasets import load_from_disk
import torch
from peft import PeftModel


# 基座模型路径
model_name_or_path = '/changzheng/PracticalTraining/lyj/lingdongai/models/model/Qwen/Qwen2___5-7B-Instruct'
# # 加载预训练的Qwen-7B模型
model = AutoModelForCausalLM.from_pretrained(model_name_or_path)

# 加载微调后的 LoRA 适配器
peft_model = PeftModel.from_pretrained(model, "/changzheng/PracticalTraining/lyj/lingdongai/models/new_model/lora_qwen_7b_model")

# 将 LoRA 模型和基础模型合并
merged_model = peft_model.merge_and_unload()

# 保存合并后的模型到一个新路径
save_model_path = "/changzheng/PracticalTraining/lyj/lingdongai/models/new_model/merged_lora_qwen_model"
merged_model.save_pretrained(save_model_path)

print("************************")
print("基座模型加lora模型保存完毕")
print("************************")