# -*-coding:utf-8 -*-

import os
import argparse
import torch
from transformers import LlamaTokenizer, AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel

# 设置命令行参数解析
parser = argparse.ArgumentParser(description="Fine-tune a model with a specified base model.")
parser.add_argument("--model_path", type=str, default="bigcode/large-model", help="Path to the base model.")
args = parser.parse_args()

# 使用命令行输入的模型路径
base_model = args.model_path

output_path = "../result/model_finetune"
os.makedirs(output_path, exist_ok=True)

tokenizer = LlamaTokenizer.from_pretrained(base_model)
finetune_dir = "../result/checkpoints/lora_finetune/"

model = AutoModelForCausalLM.from_pretrained(
    base_model,
    # load_in_8bit=True,
    torch_dtype=torch.float16,
    device_map="auto",
)

model = PeftModel.from_pretrained(model, finetune_dir)
model = model.merge_and_unload()
model.save_pretrained(output_path)
tokenizer.save_pretrained(output_path)
print("finished!!!")