Upload convert_vision_model.py with huggingface_hub
Browse files- convert_vision_model.py +13 -11
convert_vision_model.py
CHANGED
|
@@ -35,11 +35,10 @@ import torch
|
|
| 35 |
from transformers import AutoModel, AutoTokenizer, AutoProcessor
|
| 36 |
from huggingface_hub import HfApi, hf_hub_download, login
|
| 37 |
|
| 38 |
-
#
|
| 39 |
-
|
| 40 |
-
if
|
| 41 |
-
|
| 42 |
-
print("Logged in to Hugging Face Hub")
|
| 43 |
import subprocess
|
| 44 |
import shutil
|
| 45 |
import glob
|
|
@@ -86,6 +85,7 @@ if IS_LORA:
|
|
| 86 |
model_name=MODEL_PATH,
|
| 87 |
dtype=torch.float16,
|
| 88 |
load_in_4bit=False,
|
|
|
|
| 89 |
)
|
| 90 |
print(" Loaded with unsloth FastModel")
|
| 91 |
|
|
@@ -103,8 +103,8 @@ if IS_LORA:
|
|
| 103 |
from safetensors.torch import load_file
|
| 104 |
|
| 105 |
# Download adapter weights
|
| 106 |
-
adapter_weights_path = hf_hub_download(MODEL_PATH, "adapter_model.safetensors")
|
| 107 |
-
adapter_config_path = hf_hub_download(MODEL_PATH, "adapter_config.json")
|
| 108 |
|
| 109 |
with open(adapter_config_path) as f:
|
| 110 |
adapter_config = json.load(f)
|
|
@@ -129,6 +129,7 @@ if IS_LORA:
|
|
| 129 |
torch_dtype=torch.float16,
|
| 130 |
device_map="cpu", # Load on CPU first
|
| 131 |
trust_remote_code=True,
|
|
|
|
| 132 |
)
|
| 133 |
print(f" Base model loaded with {class_name}")
|
| 134 |
break
|
|
@@ -175,7 +176,7 @@ if IS_LORA:
|
|
| 175 |
processor_saved = False
|
| 176 |
for source in [MODEL_PATH, BASE_MODEL]:
|
| 177 |
try:
|
| 178 |
-
processor = AutoProcessor.from_pretrained(source, trust_remote_code=True)
|
| 179 |
processor.save_pretrained(merged_dir)
|
| 180 |
print(f" Processor saved from {source}")
|
| 181 |
processor_saved = True
|
|
@@ -186,7 +187,7 @@ if IS_LORA:
|
|
| 186 |
if not processor_saved:
|
| 187 |
for source in [MODEL_PATH, BASE_MODEL]:
|
| 188 |
try:
|
| 189 |
-
tokenizer = AutoTokenizer.from_pretrained(source, trust_remote_code=True)
|
| 190 |
tokenizer.save_pretrained(merged_dir)
|
| 191 |
print(f" Tokenizer saved from {source}")
|
| 192 |
break
|
|
@@ -195,7 +196,7 @@ if IS_LORA:
|
|
| 195 |
|
| 196 |
# Copy chat template if exists in adapter
|
| 197 |
try:
|
| 198 |
-
chat_template_path = hf_hub_download(MODEL_PATH, "chat_template.jinja")
|
| 199 |
shutil.copy(chat_template_path, f"{merged_dir}/chat_template.jinja")
|
| 200 |
print(" Copied chat_template.jinja from adapter")
|
| 201 |
except:
|
|
@@ -208,6 +209,7 @@ else:
|
|
| 208 |
repo_id=MODEL_PATH,
|
| 209 |
local_dir=merged_dir,
|
| 210 |
local_dir_use_symlinks=False,
|
|
|
|
| 211 |
)
|
| 212 |
print(f" Model downloaded to {merged_dir}")
|
| 213 |
|
|
@@ -318,7 +320,7 @@ for quant_type, desc in quant_formats:
|
|
| 318 |
|
| 319 |
# Step 6: Upload to Hub
|
| 320 |
print("\n[6/7] Uploading to Hugging Face Hub...")
|
| 321 |
-
api = HfApi()
|
| 322 |
|
| 323 |
# Upload all GGUF files
|
| 324 |
for f in os.listdir(gguf_output_dir):
|
|
|
|
| 35 |
from transformers import AutoModel, AutoTokenizer, AutoProcessor
|
| 36 |
from huggingface_hub import HfApi, hf_hub_download, login
|
| 37 |
|
| 38 |
+
# Get HF_TOKEN for private repo access
|
| 39 |
+
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 40 |
+
if HF_TOKEN:
|
| 41 |
+
print(f"HF_TOKEN found (length: {len(HF_TOKEN)})")
|
|
|
|
| 42 |
import subprocess
|
| 43 |
import shutil
|
| 44 |
import glob
|
|
|
|
| 85 |
model_name=MODEL_PATH,
|
| 86 |
dtype=torch.float16,
|
| 87 |
load_in_4bit=False,
|
| 88 |
+
token=HF_TOKEN,
|
| 89 |
)
|
| 90 |
print(" Loaded with unsloth FastModel")
|
| 91 |
|
|
|
|
| 103 |
from safetensors.torch import load_file
|
| 104 |
|
| 105 |
# Download adapter weights
|
| 106 |
+
adapter_weights_path = hf_hub_download(MODEL_PATH, "adapter_model.safetensors", token=HF_TOKEN)
|
| 107 |
+
adapter_config_path = hf_hub_download(MODEL_PATH, "adapter_config.json", token=HF_TOKEN)
|
| 108 |
|
| 109 |
with open(adapter_config_path) as f:
|
| 110 |
adapter_config = json.load(f)
|
|
|
|
| 129 |
torch_dtype=torch.float16,
|
| 130 |
device_map="cpu", # Load on CPU first
|
| 131 |
trust_remote_code=True,
|
| 132 |
+
token=HF_TOKEN,
|
| 133 |
)
|
| 134 |
print(f" Base model loaded with {class_name}")
|
| 135 |
break
|
|
|
|
| 176 |
processor_saved = False
|
| 177 |
for source in [MODEL_PATH, BASE_MODEL]:
|
| 178 |
try:
|
| 179 |
+
processor = AutoProcessor.from_pretrained(source, trust_remote_code=True, token=HF_TOKEN)
|
| 180 |
processor.save_pretrained(merged_dir)
|
| 181 |
print(f" Processor saved from {source}")
|
| 182 |
processor_saved = True
|
|
|
|
| 187 |
if not processor_saved:
|
| 188 |
for source in [MODEL_PATH, BASE_MODEL]:
|
| 189 |
try:
|
| 190 |
+
tokenizer = AutoTokenizer.from_pretrained(source, trust_remote_code=True, token=HF_TOKEN)
|
| 191 |
tokenizer.save_pretrained(merged_dir)
|
| 192 |
print(f" Tokenizer saved from {source}")
|
| 193 |
break
|
|
|
|
| 196 |
|
| 197 |
# Copy chat template if exists in adapter
|
| 198 |
try:
|
| 199 |
+
chat_template_path = hf_hub_download(MODEL_PATH, "chat_template.jinja", token=HF_TOKEN)
|
| 200 |
shutil.copy(chat_template_path, f"{merged_dir}/chat_template.jinja")
|
| 201 |
print(" Copied chat_template.jinja from adapter")
|
| 202 |
except:
|
|
|
|
| 209 |
repo_id=MODEL_PATH,
|
| 210 |
local_dir=merged_dir,
|
| 211 |
local_dir_use_symlinks=False,
|
| 212 |
+
token=HF_TOKEN,
|
| 213 |
)
|
| 214 |
print(f" Model downloaded to {merged_dir}")
|
| 215 |
|
|
|
|
| 320 |
|
| 321 |
# Step 6: Upload to Hub
|
| 322 |
print("\n[6/7] Uploading to Hugging Face Hub...")
|
| 323 |
+
api = HfApi(token=HF_TOKEN)
|
| 324 |
|
| 325 |
# Upload all GGUF files
|
| 326 |
for f in os.listdir(gguf_output_dir):
|