import os
import shutil
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoImageProcessor

model_path = "/root/autodl-tmp/FundusReasoner/experiments/med_pub_merge_model"
save_path = "/root/autodl-tmp/FundusReasoner/experiments/med_pub_merge_model_fp16"
os.makedirs(save_path, exist_ok=True)

# 1. 保存 fp16 权重
print(f"Loading model from {model_path} ...")
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(model_path, torch_dtype="float16")
print(f"Saving model weights to {save_path} ...")
model.save_pretrained(save_path)

# 2. 保存 tokenizer
print("Copying tokenizer ...")
tokenizer = AutoTokenizer.from_pretrained(model_path)
tokenizer.save_pretrained(save_path)

# 3. 保存 image processor（如有）
try:
    processor = AutoImageProcessor.from_pretrained(model_path)
    processor.save_pretrained(save_path)
    print("Copying image processor ...")
except Exception:
    print("No image processor found, skipping.")

# 4. 对比剩余文件，并补全拷贝
print("\n==== Checking for extra files to copy ====")
src_files = set(os.listdir(model_path))
dst_files = set(os.listdir(save_path))
to_copy = src_files - dst_files

if not to_copy:
    print("No extra files to copy.")
else:
    print(f"Extra files to copy: {to_copy}")
    for fname in to_copy:
        src_f = os.path.join(model_path, fname)
        dst_f = os.path.join(save_path, fname)
        # 只拷贝文件，不处理子目录（一般模型文件没有子目录）
        if os.path.isfile(src_f):
            shutil.copy2(src_f, dst_f)
            print(f"Copied: {fname}")
        else:
            print(f"Warning: {fname} is not a file, skipping.")

print("\nAll done! 新目录内容如下：")
print(sorted(os.listdir(save_path)))
