import os
from peft import PeftModel, LoraConfig, TaskType
from transformers import AutoTokenizer, AutoModelForCausalLM,Qwen2_5_VLForConditionalGeneration
import torch
# 加载预训练模型和tokenizer
peft_model_id = "./output/Qwen2.5-VL-7B/"
tokenizer = AutoTokenizer.from_pretrained("/root/autodl-tmp/Qwen/Qwen2.5-VL-7B-Instruct/", use_fast=True)

device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}   
model = Qwen2_5_VLForConditionalGeneration.from_pretrained("/root/autodl-tmp/Qwen/Qwen2.5-VL-7B-Instruct/", device_map=device_map, torch_dtype=torch.bfloat16)

# 应用Lora配置并加载微调后的模型
val_peft_model = PeftModel.from_pretrained(model=model, model_id=peft_model_id)

# 合并Lora权重到原始模型中
val_peft_model.merge_and_unload()

# 保存合并后的模型
merged_model_path = "./merged_model"
model.save_pretrained(merged_model_path)
tokenizer.save_pretrained(merged_model_path)

print(f"合并后的模型已保存至 {merged_model_path}")