import argparse
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel

parser = argparse.ArgumentParser(description="Merge LoRA model with base model.")

parser.add_argument("--base-model-path", 
                    type=str, 
                    help="Path to the base model directory.",
                    default="/data/llm/models/DeepSeek-R1-Distill-Qwen-1.5B")

parser.add_argument("--lora-model-path", 
                    type=str, 
                    help="Path to the LoRA model directory.",
                    default="saves/DeepSeek-R1-1.5B-Distill/lora/train_2025-06-16-03-35-11")

args = parser.parse_args()

# 加载原始模型
base_model_path = args.base_model_path
base_model = AutoModelForCausalLM.from_pretrained(
    base_model_path,
    torch_dtype=torch.float16,
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(base_model_path)

# 加载 LoRA 适配器
lora_model_path = args.lora_model_path
print(f'{lora_model_path}')

lora_model = PeftModel.from_pretrained(
    base_model,
    lora_model_path,
    torch_dtype=torch.float16
)

# 合并模型
merged_model = lora_model.merge_and_unload()

# 保存合并后的模型
output_path = "merged_model"
merged_model.save_pretrained(output_path)
tokenizer.save_pretrained(output_path)
