import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel

# 定义模型路径
# base_model_name = "/home/liuzhongzhong/data/models/Qwen/Qwen3-0___6B"
# lora_model_path = "/home/liuzhongzhong/data/save_modles"
# merged_model_path = "/home/liuzhongzhong/data/merge_model"

base_model_name = "/root/autodl-tmp/data/models/models/Qwen/Qwen3-8B"
lora_model_path = "/root/autodl-tmp/data/save_modles/checkpoint-43755"
merged_model_path = "/root/autodl-tmp/data/merge_models"

# 加载基础模型和tokenizer
tokenizer = AutoTokenizer.from_pretrained(base_model_name)
base_model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    torch_dtype=torch.bfloat16,
    device_map="auto",
)

# 加载LoRA适配器
model = PeftModel.from_pretrained(base_model, lora_model_path)

# 合并模型
merged_model = model.merge_and_unload()

# 保存合并后的模型
merged_model.save_pretrained(merged_model_path)
tokenizer.save_pretrained(merged_model_path)

print(f"合并模型保存到: {merged_model_path}")