from unsloth import FastLanguageModel
from peft import PeftModel
import torch

base_model_name = "Qwen3-1__7B"

base_model_path = r"/workspace/local_model_dir/Qwen3-1.7B"
lora_adapter_path = f"./save/{base_model_name}-catgirl-lora"  # 你的 LoRA 适配器路径

print("加载的模型是：", base_model_path)
print("加载的LoRA是：", lora_adapter_path)

model, tokenizer, = FastLanguageModel.from_pretrained(
    model_name = base_model_path,
    max_seq_length=2048,
    load_in_4bit=False,
    load_in_8bit=False, # 必须都设置上，否则会有默认值
    full_finetuning=False, # LoRA
)

print("当前模型加载的数据类型：", model.dtype)

model = PeftModel.from_pretrained(model, lora_adapter_path, device_map="cpu")

# 合并模型
print("Merging LoRA with base model...")
merged_model = model.merge_and_unload()

# 保存合并后的模型
output_merged_dir = f"./save/{base_model_name}-catgirl-model"
print(f"Saving merged model to: {output_merged_dir}")
merged_model.save_pretrained(output_merged_dir)
tokenizer.save_pretrained(output_merged_dir)