import os
import torch
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
from tqdm import tqdm
from pathlib import Path
import json
import argparse
import time

from optimum.onnxruntime import ORTModelForSeq2SeqLM

parse = argparse.ArgumentParser()
parse.add_argument("--base_model", type=str, default="")
parse.add_argument("--weights_path", type=str, default="")
# 添加导出路径参数
parse.add_argument("--export_path", type=str, default="merged_model")
# 添加input_ids相关参数
parse.add_argument("--sequence_length", type=int, default=512)
parse.add_argument("--batch_size", type=int, default=1)
args = parse.parse_args()


def main(
    base_model: str = args.base_model,
    weights_path: str = args.weights_path,
):
    tokenizer = AutoTokenizer.from_pretrained(base_model)
    model = AutoModelForCausalLM.from_pretrained(
        base_model, load_in_8bit=False, device_map="auto", torch_dtype="auto"
    )
    model = PeftModel.from_pretrained(
        model,
        weights_path,
        device_map="auto",
    )
    model = model.merge_and_unload()
    model.save_pretrained(args.export_path)
    tokenizer.save_pretrained(args.export_path)

    # ort_model = ORTModelForSeq2SeqLM.from_pretrained(args.export_path, export=True)
    # ort_model.save_pretrained("ONNX_model")
    # tokenizer.save_pretrained("ONNX_model")

if __name__ == "__main__":
    main()
