davanstrien HF Staff commited on
Commit
a59d9c8
·
verified ·
1 Parent(s): 39e8718

Upload glm-ocr.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. glm-ocr.py +532 -0
glm-ocr.py ADDED
@@ -0,0 +1,532 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "datasets>=3.1.0",
5
+ # "pyarrow>=17.0.0,<18.0.0",
6
+ # "huggingface-hub",
7
+ # "pillow",
8
+ # "vllm",
9
+ # "tqdm",
10
+ # "toolz",
11
+ # "torch",
12
+ # ]
13
+ #
14
+ # [[tool.uv.index]]
15
+ # url = "https://wheels.vllm.ai/nightly"
16
+ #
17
+ # [tool.uv]
18
+ # prerelease = "allow"
19
+ # override-dependencies = ["transformers @ git+https://github.com/huggingface/transformers.git"]
20
+ # ///
21
+
22
+ """
23
+ Convert document images to markdown using GLM-OCR with vLLM.
24
+
25
+ GLM-OCR is a compact 0.9B parameter OCR model achieving 94.62% on OmniDocBench V1.5.
26
+ Uses CogViT visual encoder with GLM-0.5B language decoder and Multi-Token Prediction
27
+ (MTP) loss for fast, accurate document parsing.
28
+
29
+ NOTE: Requires vLLM nightly wheels and transformers from git for GLM-OCR support.
30
+ First run may take a few minutes to download and install dependencies.
31
+
32
+ Features:
33
+ - 0.9B parameters (ultra-compact)
34
+ - 94.62% on OmniDocBench V1.5 (SOTA for sub-1B models)
35
+ - Text recognition with markdown output
36
+ - LaTeX formula recognition
37
+ - Table extraction (HTML format)
38
+ - Multilingual: zh, en, fr, es, ru, de, ja, ko
39
+ - MIT licensed
40
+
41
+ Model: zai-org/GLM-OCR
42
+ vLLM: Requires vLLM nightly build + transformers from git
43
+ Performance: 94.62% on OmniDocBench V1.5
44
+ """
45
+
46
+ import argparse
47
+ import base64
48
+ import io
49
+ import json
50
+ import logging
51
+ import os
52
+ import sys
53
+ from datetime import datetime
54
+ from typing import Any, Dict, List, Union
55
+
56
+ import torch
57
+ from datasets import load_dataset
58
+ from huggingface_hub import DatasetCard, login
59
+ from PIL import Image
60
+ from toolz import partition_all
61
+ from tqdm.auto import tqdm
62
+ from vllm import LLM, SamplingParams
63
+
64
+ logging.basicConfig(level=logging.INFO)
65
+ logger = logging.getLogger(__name__)
66
+
67
+ MODEL = "zai-org/GLM-OCR"
68
+
69
+ # Task prompts as specified by the model
70
+ TASK_PROMPTS = {
71
+ "ocr": "Text Recognition:",
72
+ "formula": "Formula Recognition:",
73
+ "table": "Table Recognition:",
74
+ }
75
+
76
+
77
+ def check_cuda_availability():
78
+ """Check if CUDA is available and exit if not."""
79
+ if not torch.cuda.is_available():
80
+ logger.error("CUDA is not available. This script requires a GPU.")
81
+ logger.error("Please run on a machine with a CUDA-capable GPU.")
82
+ sys.exit(1)
83
+ else:
84
+ logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
85
+
86
+
87
+ def make_ocr_message(
88
+ image: Union[Image.Image, Dict[str, Any], str],
89
+ task: str = "ocr",
90
+ ) -> List[Dict]:
91
+ """
92
+ Create chat message for OCR processing.
93
+
94
+ GLM-OCR uses a chat format with an image and a task prompt prefix.
95
+ Supported tasks: ocr, formula, table.
96
+ """
97
+ # Convert to PIL Image if needed
98
+ if isinstance(image, Image.Image):
99
+ pil_img = image
100
+ elif isinstance(image, dict) and "bytes" in image:
101
+ pil_img = Image.open(io.BytesIO(image["bytes"]))
102
+ elif isinstance(image, str):
103
+ pil_img = Image.open(image)
104
+ else:
105
+ raise ValueError(f"Unsupported image type: {type(image)}")
106
+
107
+ # Convert to RGB
108
+ pil_img = pil_img.convert("RGB")
109
+
110
+ # Convert to base64 data URI
111
+ buf = io.BytesIO()
112
+ pil_img.save(buf, format="PNG")
113
+ data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
114
+
115
+ prompt_text = TASK_PROMPTS.get(task, TASK_PROMPTS["ocr"])
116
+
117
+ return [
118
+ {
119
+ "role": "user",
120
+ "content": [
121
+ {"type": "image_url", "image_url": {"url": data_uri}},
122
+ {"type": "text", "text": prompt_text},
123
+ ],
124
+ }
125
+ ]
126
+
127
+
128
+ def create_dataset_card(
129
+ source_dataset: str,
130
+ model: str,
131
+ num_samples: int,
132
+ processing_time: str,
133
+ batch_size: int,
134
+ max_model_len: int,
135
+ max_tokens: int,
136
+ gpu_memory_utilization: float,
137
+ temperature: float,
138
+ top_p: float,
139
+ task: str,
140
+ image_column: str = "image",
141
+ split: str = "train",
142
+ ) -> str:
143
+ """Create a dataset card documenting the OCR process."""
144
+ model_name = model.split("/")[-1]
145
+ task_desc = {"ocr": "text recognition", "formula": "formula recognition", "table": "table recognition"}
146
+
147
+ return f"""---
148
+ tags:
149
+ - ocr
150
+ - document-processing
151
+ - glm-ocr
152
+ - markdown
153
+ - uv-script
154
+ - generated
155
+ ---
156
+
157
+ # Document OCR using {model_name}
158
+
159
+ This dataset contains OCR results from images in [{source_dataset}](https://huggingface.co/datasets/{source_dataset}) using GLM-OCR, a compact 0.9B OCR model achieving SOTA performance.
160
+
161
+ ## Processing Details
162
+
163
+ - **Source Dataset**: [{source_dataset}](https://huggingface.co/datasets/{source_dataset})
164
+ - **Model**: [{model}](https://huggingface.co/{model})
165
+ - **Task**: {task_desc.get(task, task)}
166
+ - **Number of Samples**: {num_samples:,}
167
+ - **Processing Time**: {processing_time}
168
+ - **Processing Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
169
+
170
+ ### Configuration
171
+
172
+ - **Image Column**: `{image_column}`
173
+ - **Output Column**: `markdown`
174
+ - **Dataset Split**: `{split}`
175
+ - **Batch Size**: {batch_size}
176
+ - **Max Model Length**: {max_model_len:,} tokens
177
+ - **Max Output Tokens**: {max_tokens:,}
178
+ - **Temperature**: {temperature}
179
+ - **Top P**: {top_p}
180
+ - **GPU Memory Utilization**: {gpu_memory_utilization:.1%}
181
+
182
+ ## Model Information
183
+
184
+ GLM-OCR is a compact, high-performance OCR model:
185
+ - 0.9B parameters
186
+ - 94.62% on OmniDocBench V1.5
187
+ - CogViT visual encoder + GLM-0.5B language decoder
188
+ - Multi-Token Prediction (MTP) loss for efficiency
189
+ - Multilingual: zh, en, fr, es, ru, de, ja, ko
190
+ - MIT licensed
191
+
192
+ ## Dataset Structure
193
+
194
+ The dataset contains all original columns plus:
195
+ - `markdown`: The extracted text in markdown format
196
+ - `inference_info`: JSON list tracking all OCR models applied to this dataset
197
+
198
+ ## Reproduction
199
+
200
+ ```bash
201
+ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/glm-ocr.py \\
202
+ {source_dataset} \\
203
+ <output-dataset> \\
204
+ --image-column {image_column} \\
205
+ --batch-size {batch_size} \\
206
+ --task {task}
207
+ ```
208
+
209
+ Generated with [UV Scripts](https://huggingface.co/uv-scripts)
210
+ """
211
+
212
+
213
+ def main(
214
+ input_dataset: str,
215
+ output_dataset: str,
216
+ image_column: str = "image",
217
+ batch_size: int = 16,
218
+ max_model_len: int = 8192,
219
+ max_tokens: int = 16384,
220
+ temperature: float = 0.01,
221
+ top_p: float = 0.00001,
222
+ repetition_penalty: float = 1.1,
223
+ gpu_memory_utilization: float = 0.8,
224
+ task: str = "ocr",
225
+ hf_token: str = None,
226
+ split: str = "train",
227
+ max_samples: int = None,
228
+ private: bool = False,
229
+ shuffle: bool = False,
230
+ seed: int = 42,
231
+ output_column: str = "markdown",
232
+ ):
233
+ """Process images from HF dataset through GLM-OCR model."""
234
+
235
+ check_cuda_availability()
236
+
237
+ start_time = datetime.now()
238
+
239
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
240
+ if HF_TOKEN:
241
+ login(token=HF_TOKEN)
242
+
243
+ # Validate task
244
+ if task not in TASK_PROMPTS:
245
+ logger.error(f"Unknown task '{task}'. Supported: {list(TASK_PROMPTS.keys())}")
246
+ sys.exit(1)
247
+
248
+ logger.info(f"Using model: {MODEL}")
249
+ logger.info(f"Task: {task} (prompt: '{TASK_PROMPTS[task]}')")
250
+
251
+ # Load dataset
252
+ logger.info(f"Loading dataset: {input_dataset}")
253
+ dataset = load_dataset(input_dataset, split=split)
254
+
255
+ if image_column not in dataset.column_names:
256
+ raise ValueError(
257
+ f"Column '{image_column}' not found. Available: {dataset.column_names}"
258
+ )
259
+
260
+ if shuffle:
261
+ logger.info(f"Shuffling dataset with seed {seed}")
262
+ dataset = dataset.shuffle(seed=seed)
263
+
264
+ if max_samples:
265
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
266
+ logger.info(f"Limited to {len(dataset)} samples")
267
+
268
+ # Initialize vLLM
269
+ logger.info("Initializing vLLM with GLM-OCR")
270
+ logger.info("This may take a few minutes on first run...")
271
+ llm = LLM(
272
+ model=MODEL,
273
+ trust_remote_code=True,
274
+ max_model_len=max_model_len,
275
+ gpu_memory_utilization=gpu_memory_utilization,
276
+ limit_mm_per_prompt={"image": 1},
277
+ )
278
+
279
+ # Sampling defaults from GLM-OCR SDK (github.com/zai-org/GLM-OCR)
280
+ # glmocr/config.py PageLoaderConfig: temperature=0.01, top_p=0.00001,
281
+ # top_k=1, repetition_penalty=1.1, max_tokens=16384
282
+ # generation_config.json on HF also sets do_sample=false (greedy)
283
+ sampling_params = SamplingParams(
284
+ temperature=temperature,
285
+ top_p=top_p,
286
+ max_tokens=max_tokens,
287
+ repetition_penalty=repetition_penalty,
288
+ )
289
+
290
+ logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
291
+ logger.info(f"Output will be written to column: {output_column}")
292
+
293
+ all_outputs = []
294
+
295
+ for batch_indices in tqdm(
296
+ partition_all(batch_size, range(len(dataset))),
297
+ total=(len(dataset) + batch_size - 1) // batch_size,
298
+ desc="GLM-OCR processing",
299
+ ):
300
+ batch_indices = list(batch_indices)
301
+ batch_images = [dataset[i][image_column] for i in batch_indices]
302
+
303
+ try:
304
+ batch_messages = [
305
+ make_ocr_message(img, task=task)
306
+ for img in batch_images
307
+ ]
308
+
309
+ outputs = llm.chat(batch_messages, sampling_params)
310
+
311
+ for output in outputs:
312
+ text = output.outputs[0].text.strip()
313
+ all_outputs.append(text)
314
+
315
+ except Exception as e:
316
+ logger.error(f"Error processing batch: {e}")
317
+ all_outputs.extend(["[OCR ERROR]"] * len(batch_images))
318
+
319
+ processing_duration = datetime.now() - start_time
320
+ processing_time_str = f"{processing_duration.total_seconds() / 60:.1f} min"
321
+
322
+ logger.info(f"Adding '{output_column}' column to dataset")
323
+ dataset = dataset.add_column(output_column, all_outputs)
324
+
325
+ # Inference info tracking
326
+ inference_entry = {
327
+ "model_id": MODEL,
328
+ "model_name": "GLM-OCR",
329
+ "column_name": output_column,
330
+ "timestamp": datetime.now().isoformat(),
331
+ "task": task,
332
+ "temperature": temperature,
333
+ "top_p": top_p,
334
+ "repetition_penalty": repetition_penalty,
335
+ "max_tokens": max_tokens,
336
+ }
337
+
338
+ if "inference_info" in dataset.column_names:
339
+ logger.info("Updating existing inference_info column")
340
+
341
+ def update_inference_info(example):
342
+ try:
343
+ existing_info = json.loads(example["inference_info"]) if example["inference_info"] else []
344
+ except (json.JSONDecodeError, TypeError):
345
+ existing_info = []
346
+ existing_info.append(inference_entry)
347
+ return {"inference_info": json.dumps(existing_info)}
348
+
349
+ dataset = dataset.map(update_inference_info)
350
+ else:
351
+ logger.info("Creating new inference_info column")
352
+ inference_list = [json.dumps([inference_entry])] * len(dataset)
353
+ dataset = dataset.add_column("inference_info", inference_list)
354
+
355
+ # Push to hub
356
+ logger.info(f"Pushing to {output_dataset}")
357
+ dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
358
+
359
+ # Create and push dataset card
360
+ logger.info("Creating dataset card")
361
+ card_content = create_dataset_card(
362
+ source_dataset=input_dataset,
363
+ model=MODEL,
364
+ num_samples=len(dataset),
365
+ processing_time=processing_time_str,
366
+ batch_size=batch_size,
367
+ max_model_len=max_model_len,
368
+ max_tokens=max_tokens,
369
+ gpu_memory_utilization=gpu_memory_utilization,
370
+ temperature=temperature,
371
+ top_p=top_p,
372
+ task=task,
373
+ image_column=image_column,
374
+ split=split,
375
+ )
376
+
377
+ card = DatasetCard(card_content)
378
+ card.push_to_hub(output_dataset, token=HF_TOKEN)
379
+
380
+ logger.info("Done! GLM-OCR processing complete.")
381
+ logger.info(f"Dataset available at: https://huggingface.co/datasets/{output_dataset}")
382
+ logger.info(f"Processing time: {processing_time_str}")
383
+ logger.info(f"Processing speed: {len(dataset) / processing_duration.total_seconds():.2f} images/sec")
384
+
385
+
386
+ if __name__ == "__main__":
387
+ if len(sys.argv) == 1:
388
+ print("=" * 70)
389
+ print("GLM-OCR Document Processing")
390
+ print("=" * 70)
391
+ print("\n0.9B OCR model - 94.62% on OmniDocBench V1.5")
392
+ print("\nTask modes:")
393
+ print(" ocr - Text recognition (default)")
394
+ print(" formula - LaTeX formula recognition")
395
+ print(" table - Table extraction")
396
+ print("\nExamples:")
397
+ print("\n1. Basic OCR:")
398
+ print(" uv run glm-ocr.py input-dataset output-dataset")
399
+ print("\n2. Formula recognition:")
400
+ print(" uv run glm-ocr.py docs results --task formula")
401
+ print("\n3. Table extraction:")
402
+ print(" uv run glm-ocr.py docs results --task table")
403
+ print("\n4. Test with small sample:")
404
+ print(" uv run glm-ocr.py large-dataset test --max-samples 10 --shuffle")
405
+ print("\n5. Running on HF Jobs:")
406
+ print(" hf jobs uv run --flavor l4x1 \\")
407
+ print(" -s HF_TOKEN \\")
408
+ print(" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/glm-ocr.py \\")
409
+ print(" input-dataset output-dataset --batch-size 16")
410
+ print("\nFor full help: uv run glm-ocr.py --help")
411
+ sys.exit(0)
412
+
413
+ parser = argparse.ArgumentParser(
414
+ description="Document OCR using GLM-OCR (0.9B, 94.62% OmniDocBench V1.5)",
415
+ formatter_class=argparse.RawDescriptionHelpFormatter,
416
+ epilog="""
417
+ Task modes:
418
+ ocr Text recognition to markdown (default)
419
+ formula LaTeX formula recognition
420
+ table Table extraction
421
+
422
+ Examples:
423
+ uv run glm-ocr.py my-docs analyzed-docs
424
+ uv run glm-ocr.py docs results --task formula
425
+ uv run glm-ocr.py large-dataset test --max-samples 50 --shuffle
426
+ """,
427
+ )
428
+
429
+ parser.add_argument("input_dataset", help="Input dataset ID from Hugging Face Hub")
430
+ parser.add_argument("output_dataset", help="Output dataset ID for Hugging Face Hub")
431
+ parser.add_argument(
432
+ "--image-column",
433
+ default="image",
434
+ help="Column containing images (default: image)",
435
+ )
436
+ parser.add_argument(
437
+ "--batch-size",
438
+ type=int,
439
+ default=16,
440
+ help="Batch size for processing (default: 16)",
441
+ )
442
+ parser.add_argument(
443
+ "--max-model-len",
444
+ type=int,
445
+ default=8192,
446
+ help="Maximum model context length (default: 8192)",
447
+ )
448
+ parser.add_argument(
449
+ "--max-tokens",
450
+ type=int,
451
+ default=16384,
452
+ help="Maximum tokens to generate (default: 16384)",
453
+ )
454
+ parser.add_argument(
455
+ "--temperature",
456
+ type=float,
457
+ default=0.01,
458
+ help="Sampling temperature (default: 0.01, near-greedy for OCR accuracy)",
459
+ )
460
+ parser.add_argument(
461
+ "--top-p",
462
+ type=float,
463
+ default=0.00001,
464
+ help="Top-p sampling parameter (default: 0.00001, near-greedy)",
465
+ )
466
+ parser.add_argument(
467
+ "--repetition-penalty",
468
+ type=float,
469
+ default=1.1,
470
+ help="Repetition penalty to prevent loops (default: 1.1)",
471
+ )
472
+ parser.add_argument(
473
+ "--gpu-memory-utilization",
474
+ type=float,
475
+ default=0.8,
476
+ help="GPU memory utilization (default: 0.8)",
477
+ )
478
+ parser.add_argument(
479
+ "--task",
480
+ choices=["ocr", "formula", "table"],
481
+ default="ocr",
482
+ help="OCR task mode (default: ocr)",
483
+ )
484
+ parser.add_argument("--hf-token", help="Hugging Face API token")
485
+ parser.add_argument(
486
+ "--split", default="train", help="Dataset split to use (default: train)"
487
+ )
488
+ parser.add_argument(
489
+ "--max-samples",
490
+ type=int,
491
+ help="Maximum number of samples to process (for testing)",
492
+ )
493
+ parser.add_argument(
494
+ "--private", action="store_true", help="Make output dataset private"
495
+ )
496
+ parser.add_argument(
497
+ "--shuffle", action="store_true", help="Shuffle dataset before processing"
498
+ )
499
+ parser.add_argument(
500
+ "--seed",
501
+ type=int,
502
+ default=42,
503
+ help="Random seed for shuffling (default: 42)",
504
+ )
505
+ parser.add_argument(
506
+ "--output-column",
507
+ default="markdown",
508
+ help="Column name for output text (default: markdown)",
509
+ )
510
+
511
+ args = parser.parse_args()
512
+
513
+ main(
514
+ input_dataset=args.input_dataset,
515
+ output_dataset=args.output_dataset,
516
+ image_column=args.image_column,
517
+ batch_size=args.batch_size,
518
+ max_model_len=args.max_model_len,
519
+ max_tokens=args.max_tokens,
520
+ temperature=args.temperature,
521
+ top_p=args.top_p,
522
+ repetition_penalty=args.repetition_penalty,
523
+ gpu_memory_utilization=args.gpu_memory_utilization,
524
+ task=args.task,
525
+ hf_token=args.hf_token,
526
+ split=args.split,
527
+ max_samples=args.max_samples,
528
+ private=args.private,
529
+ shuffle=args.shuffle,
530
+ seed=args.seed,
531
+ output_column=args.output_column,
532
+ )