NemoVonNirgend commited on
Commit
7929f62
·
verified ·
1 Parent(s): 3ecc6ae

Upload convert_vision_model.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. convert_vision_model.py +368 -0
convert_vision_model.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = [
3
+ # "transformers>=5.0.0rc1",
4
+ # "peft>=0.14.0",
5
+ # "torch>=2.0.0",
6
+ # "accelerate>=0.24.0",
7
+ # "huggingface_hub>=0.20.0",
8
+ # "sentencepiece>=0.1.99",
9
+ # "protobuf>=3.20.0",
10
+ # "numpy",
11
+ # "gguf",
12
+ # "safetensors",
13
+ # "pillow",
14
+ # "unsloth @ git+https://github.com/unslothai/unsloth.git",
15
+ # "xformers",
16
+ # ]
17
+ # ///
18
+
19
+ """
20
+ GGUF Conversion Script for Vision/Multimodal Models
21
+
22
+ Creates both model.gguf and mmproj-model.gguf files for vision models.
23
+
24
+ Environment variables:
25
+ - MODEL_PATH: The model to convert (full model or LoRA adapter)
26
+ - BASE_MODEL: Base model for LoRA merge (optional, only for LoRA adapters)
27
+ - OUTPUT_REPO: Where to upload GGUF files
28
+ - MODEL_NAME: Name prefix for output files
29
+ - IS_LORA: "true" if this is a LoRA adapter, "false" for full model
30
+ """
31
+
32
+ import os
33
+ import torch
34
+ from transformers import AutoModel, AutoTokenizer, AutoProcessor
35
+ from huggingface_hub import HfApi, hf_hub_download
36
+ import subprocess
37
+ import shutil
38
+ import glob
39
+
40
+ print("=" * 60)
41
+ print("GGUF Conversion Script for Vision/Multimodal Models")
42
+ print("=" * 60)
43
+
44
+ # Configuration
45
+ MODEL_PATH = os.environ.get("MODEL_PATH")
46
+ BASE_MODEL = os.environ.get("BASE_MODEL", "")
47
+ OUTPUT_REPO = os.environ.get("OUTPUT_REPO")
48
+ MODEL_NAME = os.environ.get("MODEL_NAME")
49
+ IS_LORA = os.environ.get("IS_LORA", "false").lower() == "true"
50
+
51
+ print(f"\nConfiguration:")
52
+ print(f" Model path: {MODEL_PATH}")
53
+ print(f" Base model: {BASE_MODEL}")
54
+ print(f" Output repo: {OUTPUT_REPO}")
55
+ print(f" Model name: {MODEL_NAME}")
56
+ print(f" Is LoRA: {IS_LORA}")
57
+
58
+ # Step 1: Load model (with optional LoRA merge)
59
+ print("\n[1/7] Loading model...")
60
+
61
+ merged_dir = "/tmp/merged_model"
62
+ os.makedirs(merged_dir, exist_ok=True)
63
+
64
+ if IS_LORA:
65
+ import json
66
+
67
+ print(f" Loading model with LoRA adapter...")
68
+ print(f" Base model: {BASE_MODEL}")
69
+ print(f" Adapter: {MODEL_PATH}")
70
+
71
+ model = None
72
+ tokenizer = None
73
+
74
+ # Try unsloth first (best for unsloth-trained adapters)
75
+ try:
76
+ print(" Trying unsloth FastModel...")
77
+ from unsloth import FastModel
78
+ model, tokenizer = FastModel.from_pretrained(
79
+ model_name=MODEL_PATH,
80
+ dtype=torch.float16,
81
+ load_in_4bit=False,
82
+ )
83
+ print(" Loaded with unsloth FastModel")
84
+
85
+ # Merge and save
86
+ print(" Merging LoRA weights...")
87
+ model.save_pretrained_merged(merged_dir, tokenizer, save_method="merged_16bit")
88
+ print(f" Merged model saved to {merged_dir}")
89
+ model = None # Free memory
90
+ except Exception as e:
91
+ print(f" Unsloth failed: {e}")
92
+ print(" Falling back to manual LoRA weight application...")
93
+
94
+ # Manual approach: load base model, then manually apply LoRA weights
95
+ from peft import LoraConfig, get_peft_model
96
+ from safetensors.torch import load_file
97
+
98
+ # Download adapter weights
99
+ adapter_weights_path = hf_hub_download(MODEL_PATH, "adapter_model.safetensors")
100
+ adapter_config_path = hf_hub_download(MODEL_PATH, "adapter_config.json")
101
+
102
+ with open(adapter_config_path) as f:
103
+ adapter_config = json.load(f)
104
+
105
+ # Load base model with specific class
106
+ model_classes = [
107
+ ("Glm4vForConditionalGeneration", "transformers"),
108
+ ("Mistral3ForConditionalGeneration", "transformers"),
109
+ ("Gemma3ForConditionalGeneration", "transformers"),
110
+ ("AutoModelForVision2Seq", "transformers"),
111
+ ]
112
+
113
+ base_model = None
114
+ for class_name, module in model_classes:
115
+ try:
116
+ import importlib
117
+ mod = importlib.import_module(module)
118
+ model_class = getattr(mod, class_name)
119
+ print(f" Trying {class_name}...")
120
+ base_model = model_class.from_pretrained(
121
+ BASE_MODEL,
122
+ torch_dtype=torch.float16,
123
+ device_map="cpu", # Load on CPU first
124
+ trust_remote_code=True,
125
+ )
126
+ print(f" Base model loaded with {class_name}")
127
+ break
128
+ except Exception as e2:
129
+ print(f" {class_name} failed: {e2}")
130
+ continue
131
+
132
+ if base_model is None:
133
+ raise ValueError(f"Could not load base model {BASE_MODEL}")
134
+
135
+ # Load adapter weights
136
+ print(" Loading adapter weights...")
137
+ adapter_weights = load_file(adapter_weights_path)
138
+
139
+ # Apply LoRA weights manually
140
+ print(" Applying LoRA weights to base model...")
141
+ lora_alpha = adapter_config.get("lora_alpha", 16)
142
+ lora_r = adapter_config.get("r", 8)
143
+ scaling = lora_alpha / lora_r
144
+
145
+ state_dict = base_model.state_dict()
146
+ for key, value in adapter_weights.items():
147
+ # LoRA weights are named like: base_layer.lora_A.weight, base_layer.lora_B.weight
148
+ if "lora_A" in key:
149
+ base_key = key.replace(".lora_A.weight", ".weight").replace("base_model.model.", "")
150
+ lora_b_key = key.replace("lora_A", "lora_B")
151
+ if lora_b_key in adapter_weights and base_key in state_dict:
152
+ lora_a = value
153
+ lora_b = adapter_weights[lora_b_key]
154
+ # Merge: W = W + scaling * B @ A
155
+ delta = scaling * (lora_b @ lora_a)
156
+ state_dict[base_key] = state_dict[base_key] + delta.to(state_dict[base_key].dtype)
157
+
158
+ base_model.load_state_dict(state_dict)
159
+ print(" LoRA weights applied")
160
+
161
+ # Save merged model
162
+ base_model.save_pretrained(merged_dir, safe_serialization=True)
163
+ del base_model
164
+
165
+ # Load and save tokenizer/processor from adapter (has chat template)
166
+ # Try adapter first, then base model
167
+ print(" Saving processor/tokenizer...")
168
+ processor_saved = False
169
+ for source in [MODEL_PATH, BASE_MODEL]:
170
+ try:
171
+ processor = AutoProcessor.from_pretrained(source, trust_remote_code=True)
172
+ processor.save_pretrained(merged_dir)
173
+ print(f" Processor saved from {source}")
174
+ processor_saved = True
175
+ break
176
+ except Exception as e:
177
+ print(f" Could not load processor from {source}: {e}")
178
+
179
+ if not processor_saved:
180
+ for source in [MODEL_PATH, BASE_MODEL]:
181
+ try:
182
+ tokenizer = AutoTokenizer.from_pretrained(source, trust_remote_code=True)
183
+ tokenizer.save_pretrained(merged_dir)
184
+ print(f" Tokenizer saved from {source}")
185
+ break
186
+ except Exception as e:
187
+ print(f" Could not load tokenizer from {source}: {e}")
188
+
189
+ # Copy chat template if exists in adapter
190
+ try:
191
+ chat_template_path = hf_hub_download(MODEL_PATH, "chat_template.jinja")
192
+ shutil.copy(chat_template_path, f"{merged_dir}/chat_template.jinja")
193
+ print(" Copied chat_template.jinja from adapter")
194
+ except:
195
+ pass
196
+ else:
197
+ print(f" Loading full model: {MODEL_PATH}")
198
+ # For full models, download directly to merged_dir
199
+ from huggingface_hub import snapshot_download
200
+ snapshot_download(
201
+ repo_id=MODEL_PATH,
202
+ local_dir=merged_dir,
203
+ local_dir_use_symlinks=False,
204
+ )
205
+ print(f" Model downloaded to {merged_dir}")
206
+
207
+ torch.cuda.empty_cache()
208
+ print(" Model prepared")
209
+
210
+ # List contents of merged dir
211
+ print(f"\n Contents of {merged_dir}:")
212
+ for f in sorted(os.listdir(merged_dir))[:15]:
213
+ print(f" {f}")
214
+
215
+ # Step 2: Install build tools and clone llama.cpp
216
+ print("\n[2/7] Setting up llama.cpp...")
217
+ subprocess.run(["apt-get", "update", "-qq"], check=True, capture_output=True)
218
+ subprocess.run(["apt-get", "install", "-y", "-qq", "build-essential", "cmake"], check=True, capture_output=True)
219
+ print(" Build tools installed")
220
+
221
+ if os.path.exists("/tmp/llama.cpp"):
222
+ shutil.rmtree("/tmp/llama.cpp")
223
+ subprocess.run(
224
+ ["git", "clone", "--depth", "1", "https://github.com/ggml-org/llama.cpp.git", "/tmp/llama.cpp"],
225
+ check=True, capture_output=True
226
+ )
227
+ print(" llama.cpp cloned")
228
+
229
+ subprocess.run(["pip", "install", "-q", "-r", "/tmp/llama.cpp/requirements.txt"], check=True, capture_output=True)
230
+ print(" Python dependencies installed")
231
+
232
+ # Step 3: Convert to GGUF with mmproj (FP16)
233
+ print("\n[3/7] Converting to GGUF format with multimodal projector...")
234
+ gguf_output_dir = "/tmp/gguf_output"
235
+ os.makedirs(gguf_output_dir, exist_ok=True)
236
+
237
+ convert_script = "/tmp/llama.cpp/convert_hf_to_gguf.py"
238
+ gguf_fp16 = f"{gguf_output_dir}/{MODEL_NAME}-f16.gguf"
239
+
240
+ # Convert with --mmproj to generate vision projector
241
+ print(" Running conversion with --mmproj...")
242
+ result = subprocess.run(
243
+ ["python", convert_script, merged_dir, "--outfile", gguf_fp16, "--outtype", "f16", "--mmproj", merged_dir],
244
+ capture_output=True, text=True
245
+ )
246
+ print(result.stdout)
247
+ if result.stderr:
248
+ print("STDERR:", result.stderr)
249
+
250
+ if result.returncode != 0:
251
+ print(" Warning: mmproj conversion may have failed, trying without...")
252
+ result = subprocess.run(
253
+ ["python", convert_script, merged_dir, "--outfile", gguf_fp16, "--outtype", "f16"],
254
+ check=True, capture_output=True, text=True
255
+ )
256
+ print(result.stdout)
257
+
258
+ print(f" FP16 GGUF created")
259
+
260
+ # Find the mmproj file
261
+ mmproj_files = glob.glob(f"{gguf_output_dir}/mmproj*.gguf")
262
+ if not mmproj_files:
263
+ # Check current directory too
264
+ mmproj_files = glob.glob("mmproj*.gguf")
265
+ if mmproj_files:
266
+ # Move to output dir
267
+ for f in mmproj_files:
268
+ shutil.move(f, gguf_output_dir)
269
+ mmproj_files = glob.glob(f"{gguf_output_dir}/mmproj*.gguf")
270
+
271
+ print(f"\n Files in output dir:")
272
+ for f in os.listdir(gguf_output_dir):
273
+ size_gb = os.path.getsize(f"{gguf_output_dir}/{f}") / (1024**3)
274
+ print(f" {f}: {size_gb:.2f} GB")
275
+
276
+ # Step 4: Build quantize tool
277
+ print("\n[4/7] Building quantize tool...")
278
+ os.makedirs("/tmp/llama.cpp/build", exist_ok=True)
279
+
280
+ subprocess.run(
281
+ ["cmake", "-B", "/tmp/llama.cpp/build", "-S", "/tmp/llama.cpp", "-DGGML_CUDA=OFF"],
282
+ check=True, capture_output=True, text=True
283
+ )
284
+ subprocess.run(
285
+ ["cmake", "--build", "/tmp/llama.cpp/build", "--target", "llama-quantize", "-j", "4"],
286
+ check=True, capture_output=True, text=True
287
+ )
288
+ print(" Quantize tool built")
289
+
290
+ quantize_bin = "/tmp/llama.cpp/build/bin/llama-quantize"
291
+
292
+ # Step 5: Create quantized versions
293
+ print("\n[5/7] Creating quantized versions...")
294
+ quant_formats = [
295
+ ("Q4_K_M", "4-bit medium"),
296
+ ("Q5_K_M", "5-bit medium"),
297
+ ("Q8_0", "8-bit"),
298
+ ]
299
+
300
+ quantized_files = []
301
+ for quant_type, desc in quant_formats:
302
+ print(f" Creating {quant_type} ({desc})...")
303
+ quant_file = f"{gguf_output_dir}/{MODEL_NAME}-{quant_type.lower()}.gguf"
304
+ result = subprocess.run([quantize_bin, gguf_fp16, quant_file, quant_type], capture_output=True, text=True)
305
+ if result.returncode == 0:
306
+ size_gb = os.path.getsize(quant_file) / (1024**3)
307
+ print(f" {quant_type}: {size_gb:.2f} GB")
308
+ quantized_files.append((quant_file, quant_type))
309
+ else:
310
+ print(f" {quant_type}: FAILED - {result.stderr}")
311
+
312
+ # Step 6: Upload to Hub
313
+ print("\n[6/7] Uploading to Hugging Face Hub...")
314
+ api = HfApi()
315
+
316
+ # Upload all GGUF files
317
+ for f in os.listdir(gguf_output_dir):
318
+ if f.endswith('.gguf'):
319
+ filepath = f"{gguf_output_dir}/{f}"
320
+ print(f" Uploading {f}...")
321
+ api.upload_file(
322
+ path_or_fileobj=filepath,
323
+ path_in_repo=f,
324
+ repo_id=OUTPUT_REPO,
325
+ )
326
+
327
+ # Step 7: Create model card entry
328
+ print("\n[7/7] Creating model info...")
329
+ info_content = f"""
330
+ ## {MODEL_NAME}
331
+
332
+ Vision/Multimodal model converted to GGUF.
333
+
334
+ **Source:** {MODEL_PATH}
335
+ **Base:** {BASE_MODEL if BASE_MODEL else "N/A"}
336
+
337
+ ### Files
338
+ - `{MODEL_NAME}-f16.gguf` - Full precision
339
+ - `{MODEL_NAME}-q8_0.gguf` - 8-bit quantized
340
+ - `{MODEL_NAME}-q5_k_m.gguf` - 5-bit quantized
341
+ - `{MODEL_NAME}-q4_k_m.gguf` - 4-bit quantized (recommended)
342
+ - `mmproj-*.gguf` - Vision projector (required for image input)
343
+
344
+ ### Usage with llama.cpp
345
+ ```bash
346
+ llama-mtmd-cli -m {MODEL_NAME}-q4_k_m.gguf --mmproj mmproj-{MODEL_NAME}-f16.gguf --image your_image.jpg
347
+ ```
348
+ """
349
+
350
+ # Append to README if exists
351
+ try:
352
+ existing = api.hf_hub_download(OUTPUT_REPO, "README.md")
353
+ with open(existing) as f:
354
+ content = f.read()
355
+ content += "\n" + info_content
356
+ except:
357
+ content = f"# {OUTPUT_REPO.split('/')[-1]}\n\nGGUF model collection.\n" + info_content
358
+
359
+ api.upload_file(
360
+ path_or_fileobj=content.encode(),
361
+ path_in_repo="README.md",
362
+ repo_id=OUTPUT_REPO,
363
+ )
364
+
365
+ print("\n" + "=" * 60)
366
+ print(f"CONVERSION COMPLETE: {MODEL_NAME}")
367
+ print(f"Repository: https://huggingface.co/{OUTPUT_REPO}")
368
+ print("=" * 60)