| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
|
|
| |
| |
|
|
| |
| |
|
|
| |
|
|
| import torch |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| from peft import PeftModel, PeftConfig |
| import os |
|
|
| def merge_and_save_model( |
| base_model_name: str = "LiquidAI/LFM2-2.6B", |
| adapter_path: str = "./lfm_minimal_output/final_model", |
| output_path: str = "./merged_counselor_minimal_2b" |
| ): |
| """ |
| Properly merge LoRA weights with base model |
| """ |
| print("Loading base model...") |
| |
| base_model = AutoModelForCausalLM.from_pretrained( |
| base_model_name, |
| torch_dtype=torch.float16, |
| device_map="auto", |
| trust_remote_code=True |
| ) |
| |
| print("Loading LoRA adapter...") |
| |
| model = PeftModel.from_pretrained( |
| base_model, |
| adapter_path, |
| torch_dtype=torch.float16, |
| ) |
| |
| print("Merging weights...") |
| |
| model = model.merge_and_unload() |
| |
| print(f"Saving merged model to {output_path}...") |
| |
| model.save_pretrained(output_path) |
| |
| |
| tokenizer = AutoTokenizer.from_pretrained(adapter_path) |
| tokenizer.save_pretrained(output_path) |
| |
| print("✅ Model merged and saved successfully!") |
| return model, tokenizer |
|
|
| |
| if __name__ == "__main__": |
| merge_and_save_model() |
|
|