from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel
import torch

# 1. 配置路径
base_model_name_or_path = "E:\hiswkspace\pediatrics_llm_qa\Qwen2.5-1.5B-Instruct"  # 基础模型路径或Hugging Face名称
lora_model_path = "E:\hiswkspace\pediatrics_llm_qa\pediatrics_llm_qa2.5_1.5B"  # 你的LoRA模型文件路径
output_dir = "./merged_qwen_model"  # 合并后模型的保存路径

# 2. 加载基础模型和分词器
tokenizer = AutoTokenizer.from_pretrained(base_model_name_or_path)
model = AutoModelForCausalLM.from_pretrained(
    base_model_name_or_path,
    device_map="auto",  # 自动分配设备（CPU/GPU）
    torch_dtype=torch.float16,  # 使用float16节省显存
    trust_remote_code=True
)

# 3. 加载LoRA适配器并合并
model = PeftModel.from_pretrained(model, lora_model_path)
merged_model = model.merge_and_unload()  # 关键步骤：合并LoRA权重到基础模型

# 4. 保存合并后的模型
merged_model.save_pretrained(output_dir, safe_serialization=True)
tokenizer.save_pretrained(output_dir)

print(f"合并完成！模型已保存至：{output_dir}")
