import argparse
from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, GenerationConfig
from peft import AutoPeftModelForCausalLM, PeftModel
from transformers import AutoTokenizer

parser = argparse.ArgumentParser(description='Script to merge Llama-3-hf with Llama-3-Chinese or Llama-3-Chinese-Instruct LoRA weights')
parser.add_argument('--path_to_adapter', default='G:/LLM/lover_llama/script/training/output', type=str, help="微调后模型路径")
parser.add_argument('--new_model_directory', default='G:/LLM/lover_llama/script/training/merged_model', type=str, help="保存合并后模型路径")
args = parser.parse_args()

# 1. 模型合并
model = AutoPeftModelForCausalLM.from_pretrained(
   args.path_to_adapter, # path to the output directory
   device_map='auto',
   trust_remote_code=True).eval()

merged_model = model.merge_and_unload()
# max_shard_size and safe serialization are not necessary.
# They respectively work for sharding checkpoint and save the model to safetensors
merged_model.save_pretrained(args.new_model_directory, max_shard_size='2048MB', safe_serialization=True)


# 2. 分词器保存
tokenizer = AutoTokenizer.from_pretrained(
   args.path_to_adapter, # path to the output directory
   trust_remote_code=True)

tokenizer.save_pretrained(args.new_model_directory)