File size: 1,194 Bytes
8014d08
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import os

# Configuration - replace these with your actual paths
LORA_MODEL_PATH = "./checkpoints/archer_Qwen3-14B_rsa"     # e.g., "./lora_output"
BASE_MODEL_NAME = "/home/jiashuo/codes/ForesightOptim/checkpoints/im_Qwen3-14B_rsa/merged_model"         # e.g., "./merged_model"

def merge_lora(base_model_name, lora_path, output_path):
    # Load base model
    base_model = AutoModelForCausalLM.from_pretrained(
        base_model_name,
        return_dict=True,
        torch_dtype=torch.float16,
        device_map="auto"
    )
    
    # Load tokenizer
    tokenizer = AutoTokenizer.from_pretrained(base_model_name)
    
    # Load LoRA adapter
    lora_model = PeftModel.from_pretrained(base_model, lora_path)
    
    # Merge weights
    merged_model = lora_model.merge_and_unload()
    
    # Save merged model
    merged_model.save_pretrained(output_path)
    tokenizer.save_pretrained(output_path)
    print(f"Merged model saved to {output_path}")

if __name__ == "__main__":
    merge_lora(BASE_MODEL_NAME, LORA_MODEL_PATH, os.path.join(LORA_MODEL_PATH, "merged_model"))