davanstrien HF Staff Claude Opus 4.6 commited on
Commit
952354b
·
1 Parent(s): 0bc9b0a

Fix deepseek-ocr-vllm.py: use official vLLM offline pattern (P0)

Browse files

Rewrote inference core to match the official vLLM DeepSeek-OCR recipe:
- llm.generate() with PIL images instead of llm.chat() with base64
- Added NGramPerReqLogitsProcessor to prevent repetition loops
- Added enable_prefix_caching=False, mm_processor_cache_gb=0
- Added skip_special_tokens=False and ngram/window sampling params
- Removed resolution modes (only apply to transformers API, not vLLM)
- Removed deprecated HF_XET_HIGH_PERFORMANCE env var
- Bumped script_version to 2.0.0

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

Files changed (1) hide show
  1. deepseek-ocr-vllm.py +59 -193
deepseek-ocr-vllm.py CHANGED
@@ -23,11 +23,14 @@ Convert document images to markdown using DeepSeek-OCR with vLLM.
23
  This script processes images through the DeepSeek-OCR model to extract
24
  text and structure as markdown, using vLLM for efficient batch processing.
25
 
26
- NOTE: Uses vLLM nightly wheels from main (PR #27247 now merged). First run
27
- may take a few minutes to download and install dependencies.
 
 
 
 
28
 
29
  Features:
30
- - Multiple resolution modes (Tiny/Small/Base/Large/Gundam)
31
  - LaTeX equation recognition
32
  - Table extraction and formatting
33
  - Document structure preservation
@@ -37,14 +40,13 @@ Features:
37
  """
38
 
39
  import argparse
40
- import base64
41
  import io
42
  import json
43
  import logging
44
  import os
45
  import sys
46
- from typing import Any, Dict, List, Union
47
  from datetime import datetime
 
48
 
49
  import torch
50
  from datasets import load_dataset
@@ -53,23 +55,11 @@ from PIL import Image
53
  from toolz import partition_all
54
  from tqdm.auto import tqdm
55
  from vllm import LLM, SamplingParams
 
56
 
57
  logging.basicConfig(level=logging.INFO)
58
  logger = logging.getLogger(__name__)
59
 
60
- # Resolution mode presets
61
- RESOLUTION_MODES = {
62
- "tiny": {"base_size": 512, "image_size": 512, "crop_mode": False},
63
- "small": {"base_size": 640, "image_size": 640, "crop_mode": False},
64
- "base": {"base_size": 1024, "image_size": 1024, "crop_mode": False},
65
- "large": {"base_size": 1280, "image_size": 1280, "crop_mode": False},
66
- "gundam": {
67
- "base_size": 1024,
68
- "image_size": 640,
69
- "crop_mode": True,
70
- }, # Dynamic resolution
71
- }
72
-
73
  # Prompt mode presets (from DeepSeek-OCR GitHub)
74
  PROMPT_MODES = {
75
  "document": "<image>\n<|grounding|>Convert the document to markdown.",
@@ -90,40 +80,17 @@ def check_cuda_availability():
90
  logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
91
 
92
 
93
- def make_ocr_message(
94
- image: Union[Image.Image, Dict[str, Any], str],
95
- prompt: str = "<image>\n<|grounding|>Convert the document to markdown. ",
96
- ) -> List[Dict]:
97
- """Create chat message for OCR processing."""
98
- # Convert to PIL Image if needed
99
  if isinstance(image, Image.Image):
100
- pil_img = image
101
  elif isinstance(image, dict) and "bytes" in image:
102
- pil_img = Image.open(io.BytesIO(image["bytes"]))
103
  elif isinstance(image, str):
104
- pil_img = Image.open(image)
105
  else:
106
  raise ValueError(f"Unsupported image type: {type(image)}")
107
 
108
- # Convert to RGB
109
- pil_img = pil_img.convert("RGB")
110
-
111
- # Convert to base64 data URI
112
- buf = io.BytesIO()
113
- pil_img.save(buf, format="PNG")
114
- data_uri = f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode()}"
115
-
116
- # Return message in vLLM format
117
- return [
118
- {
119
- "role": "user",
120
- "content": [
121
- {"type": "image_url", "image_url": {"url": data_uri}},
122
- {"type": "text", "text": prompt},
123
- ],
124
- }
125
- ]
126
-
127
 
128
  def create_dataset_card(
129
  source_dataset: str,
@@ -134,10 +101,6 @@ def create_dataset_card(
134
  max_model_len: int,
135
  max_tokens: int,
136
  gpu_memory_utilization: float,
137
- resolution_mode: str,
138
- base_size: int,
139
- image_size: int,
140
- crop_mode: bool,
141
  image_column: str = "image",
142
  split: str = "train",
143
  ) -> str:
@@ -173,10 +136,6 @@ This dataset contains markdown-formatted OCR results from images in [{source_dat
173
  - **Output Column**: `markdown`
174
  - **Dataset Split**: `{split}`
175
  - **Batch Size**: {batch_size}
176
- - **Resolution Mode**: {resolution_mode}
177
- - **Base Size**: {base_size}
178
- - **Image Size**: {image_size}
179
- - **Crop Mode**: {crop_mode}
180
  - **Max Model Length**: {max_model_len:,} tokens
181
  - **Max Output Tokens**: {max_tokens:,}
182
  - **GPU Memory Utilization**: {gpu_memory_utilization:.1%}
@@ -184,20 +143,12 @@ This dataset contains markdown-formatted OCR results from images in [{source_dat
184
  ## Model Information
185
 
186
  DeepSeek-OCR is a state-of-the-art document OCR model that excels at:
187
- - 📐 **LaTeX equations** - Mathematical formulas preserved in LaTeX format
188
- - 📊 **Tables** - Extracted and formatted as HTML/markdown
189
- - 📝 **Document structure** - Headers, lists, and formatting maintained
190
- - 🖼️ **Image grounding** - Spatial layout and bounding box information
191
- - 🔍 **Complex layouts** - Multi-column and hierarchical structures
192
- - 🌍 **Multilingual** - Supports multiple languages
193
-
194
- ### Resolution Modes
195
-
196
- - **Tiny** (512×512): Fast processing, 64 vision tokens
197
- - **Small** (640×640): Balanced speed/quality, 100 vision tokens
198
- - **Base** (1024×1024): High quality, 256 vision tokens
199
- - **Large** (1280×1280): Maximum quality, 400 vision tokens
200
- - **Gundam** (dynamic): Adaptive multi-tile processing for large documents
201
 
202
  ## Dataset Structure
203
 
@@ -233,7 +184,6 @@ This dataset was generated using the [uv-scripts/ocr](https://huggingface.co/dat
233
  uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py \\\\
234
  {source_dataset} \\\\
235
  <output-dataset> \\\\
236
- --resolution-mode {resolution_mode} \\\\
237
  --image-column {image_column}
238
  ```
239
 
@@ -242,7 +192,7 @@ uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm
242
  - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60):.1f} images/second
243
  - **Processing Method**: Batch processing with vLLM (2-3x speedup over sequential)
244
 
245
- Generated with 🤖 [UV Scripts](https://huggingface.co/uv-scripts)
246
  """
247
 
248
 
@@ -250,12 +200,8 @@ def main(
250
  input_dataset: str,
251
  output_dataset: str,
252
  image_column: str = "image",
253
- batch_size: int = 8, # Smaller batch size to avoid potential memory issues with DeepSeek-OCR
254
  model: str = "deepseek-ai/DeepSeek-OCR",
255
- resolution_mode: str = "gundam",
256
- base_size: int = None,
257
- image_size: int = None,
258
- crop_mode: bool = None,
259
  max_model_len: int = 8192,
260
  max_tokens: int = 8192,
261
  gpu_memory_utilization: float = 0.8,
@@ -276,45 +222,11 @@ def main(
276
  # Track processing start time
277
  start_time = datetime.now()
278
 
279
- # Enable high-performance Xet downloads
280
- os.environ["HF_XET_HIGH_PERFORMANCE"] = "1"
281
-
282
  # Login to HF if token provided
283
  HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
284
  if HF_TOKEN:
285
  login(token=HF_TOKEN)
286
 
287
- # Determine resolution settings
288
- if resolution_mode in RESOLUTION_MODES:
289
- mode_config = RESOLUTION_MODES[resolution_mode]
290
- final_base_size = (
291
- base_size if base_size is not None else mode_config["base_size"]
292
- )
293
- final_image_size = (
294
- image_size if image_size is not None else mode_config["image_size"]
295
- )
296
- final_crop_mode = (
297
- crop_mode if crop_mode is not None else mode_config["crop_mode"]
298
- )
299
- logger.info(f"Using resolution mode: {resolution_mode}")
300
- else:
301
- # Custom mode - require all parameters
302
- if base_size is None or image_size is None or crop_mode is None:
303
- raise ValueError(
304
- f"Invalid resolution mode '{resolution_mode}'. "
305
- f"Use one of {list(RESOLUTION_MODES.keys())} or specify "
306
- f"--base-size, --image-size, and --crop-mode manually."
307
- )
308
- final_base_size = base_size
309
- final_image_size = image_size
310
- final_crop_mode = crop_mode
311
- resolution_mode = "custom"
312
-
313
- logger.info(
314
- f"Resolution: base_size={final_base_size}, "
315
- f"image_size={final_image_size}, crop_mode={final_crop_mode}"
316
- )
317
-
318
  # Determine prompt
319
  if prompt is not None:
320
  final_prompt = prompt
@@ -350,31 +262,34 @@ def main(
350
  dataset = dataset.select(range(min(max_samples, len(dataset))))
351
  logger.info(f"Limited to {len(dataset)} samples")
352
 
353
- # Initialize vLLM
354
  logger.info(f"Initializing vLLM with model: {model}")
355
  logger.info("This may take a few minutes on first run...")
356
 
357
- # Add specific parameters for DeepSeek-OCR compatibility
358
  llm = LLM(
359
  model=model,
360
  trust_remote_code=True,
361
  max_model_len=max_model_len,
362
  gpu_memory_utilization=gpu_memory_utilization,
363
- limit_mm_per_prompt={"image": 1},
364
- enforce_eager=False, # Use torch.compile instead of eager execution
 
365
  )
366
 
367
  sampling_params = SamplingParams(
368
- temperature=0.0, # Deterministic for OCR
369
  max_tokens=max_tokens,
 
 
 
 
 
 
370
  )
371
 
372
  logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
373
- logger.info(
374
- "Using vLLM for batch processing - should be faster than sequential processing"
375
- )
376
 
377
- # Process images in batches
378
  all_markdown = []
379
 
380
  for batch_indices in tqdm(
@@ -386,11 +301,16 @@ def main(
386
  batch_images = [dataset[i][image_column] for i in batch_indices]
387
 
388
  try:
389
- # Create messages for batch
390
- batch_messages = [make_ocr_message(img, final_prompt) for img in batch_images]
 
 
 
 
 
391
 
392
- # Process with vLLM
393
- outputs = llm.chat(batch_messages, sampling_params)
394
 
395
  # Extract outputs
396
  for output in outputs:
@@ -399,7 +319,6 @@ def main(
399
 
400
  except Exception as e:
401
  logger.error(f"Error processing batch: {e}")
402
- # Add error placeholders for failed batch
403
  all_markdown.extend(["[OCR FAILED]"] * len(batch_images))
404
 
405
  # Calculate processing time
@@ -432,10 +351,6 @@ def main(
432
  "column_name": "markdown",
433
  "model_id": model,
434
  "processing_date": datetime.now().isoformat(),
435
- "resolution_mode": resolution_mode,
436
- "base_size": final_base_size,
437
- "image_size": final_image_size,
438
- "crop_mode": final_crop_mode,
439
  "prompt": final_prompt,
440
  "prompt_mode": prompt_mode if prompt is None else "custom",
441
  "batch_size": batch_size,
@@ -443,9 +358,9 @@ def main(
443
  "gpu_memory_utilization": gpu_memory_utilization,
444
  "max_model_len": max_model_len,
445
  "script": "deepseek-ocr-vllm.py",
446
- "script_version": "1.0.0",
447
  "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py",
448
- "implementation": "vllm (batch processing)",
449
  }
450
  existing_info.append(new_info)
451
 
@@ -468,10 +383,6 @@ def main(
468
  max_model_len=max_model_len,
469
  max_tokens=max_tokens,
470
  gpu_memory_utilization=gpu_memory_utilization,
471
- resolution_mode=resolution_mode,
472
- base_size=final_base_size,
473
- image_size=final_image_size,
474
- crop_mode=final_crop_mode,
475
  image_column=image_column,
476
  split=split,
477
  )
@@ -496,37 +407,28 @@ if __name__ == "__main__":
496
  print("\nThis script converts document images to markdown using")
497
  print("DeepSeek-OCR with vLLM for efficient batch processing.")
498
  print("\nFeatures:")
499
- print("- Multiple resolution modes (Tiny/Small/Base/Large/Gundam)")
500
  print("- LaTeX equation recognition")
501
  print("- Table extraction and formatting")
502
  print("- Document structure preservation")
503
  print("- Image grounding and spatial layout")
504
  print("- Multilingual support")
505
- print("- Fast batch processing with vLLM (2-3x speedup)")
506
  print("\nExample usage:")
507
- print("\n1. Basic OCR conversion (Gundam mode - dynamic resolution):")
508
  print(" uv run deepseek-ocr-vllm.py document-images markdown-docs")
509
- print("\n2. High quality mode (Large - 1280×1280):")
510
  print(
511
- " uv run deepseek-ocr-vllm.py scanned-pdfs extracted-text --resolution-mode large"
512
  )
513
- print("\n3. Fast processing (Tiny - 512×512):")
514
- print(" uv run deepseek-ocr-vllm.py quick-test output --resolution-mode tiny")
515
- print("\n4. Parse figures from documents:")
516
- print(" uv run deepseek-ocr-vllm.py scientific-papers figures --prompt-mode figure")
517
- print("\n5. Free OCR without layout:")
518
  print(" uv run deepseek-ocr-vllm.py images text --prompt-mode free")
519
- print("\n6. Process a subset for testing:")
520
  print(
521
  " uv run deepseek-ocr-vllm.py large-dataset test-output --max-samples 10"
522
  )
523
- print("\n7. Custom resolution:")
524
- print(" uv run deepseek-ocr-vllm.py dataset output \\")
525
- print(" --base-size 1024 --image-size 640 --crop-mode")
526
- print("\n8. Running on HF Jobs:")
527
  print(" hf jobs uv run --flavor l4x1 \\")
528
  print(" -s HF_TOKEN \\")
529
- print(" -e UV_TORCH_BACKEND=auto \\")
530
  print(
531
  " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py \\"
532
  )
@@ -540,13 +442,6 @@ if __name__ == "__main__":
540
  description="OCR images to markdown using DeepSeek-OCR (vLLM)",
541
  formatter_class=argparse.RawDescriptionHelpFormatter,
542
  epilog="""
543
- Resolution Modes:
544
- tiny 512×512 pixels, fast processing (64 vision tokens)
545
- small 640×640 pixels, balanced (100 vision tokens)
546
- base 1024×1024 pixels, high quality (256 vision tokens)
547
- large 1280×1280 pixels, maximum quality (400 vision tokens)
548
- gundam Dynamic multi-tile processing (adaptive)
549
-
550
  Prompt Modes:
551
  document Convert document to markdown with grounding (default)
552
  image OCR any image with grounding
@@ -555,29 +450,25 @@ Prompt Modes:
555
  describe Generate detailed image descriptions
556
 
557
  Examples:
558
- # Basic usage with default Gundam mode
559
  uv run deepseek-ocr-vllm.py my-images-dataset ocr-results
560
 
561
- # High quality processing
562
- uv run deepseek-ocr-vllm.py documents extracted-text --resolution-mode large
563
-
564
- # Fast processing for testing
565
- uv run deepseek-ocr-vllm.py dataset output --resolution-mode tiny --max-samples 100
566
-
567
  # Parse figures from a document dataset
568
  uv run deepseek-ocr-vllm.py scientific-papers figures --prompt-mode figure
569
 
570
- # Free OCR without layout (fastest)
571
  uv run deepseek-ocr-vllm.py images text --prompt-mode free
572
 
573
  # Custom prompt for specific task
574
- uv run deepseek-ocr-vllm.py dataset output --prompt "<image>\nExtract all table data."
575
-
576
- # Custom resolution settings
577
- uv run deepseek-ocr-vllm.py dataset output --base-size 1024 --image-size 640 --crop-mode
578
 
579
  # With custom batch size for performance tuning
580
  uv run deepseek-ocr-vllm.py dataset output --batch-size 16 --max-model-len 16384
 
 
 
 
 
581
  """,
582
  )
583
 
@@ -599,27 +490,6 @@ Examples:
599
  default="deepseek-ai/DeepSeek-OCR",
600
  help="Model to use (default: deepseek-ai/DeepSeek-OCR)",
601
  )
602
- parser.add_argument(
603
- "--resolution-mode",
604
- default="gundam",
605
- choices=list(RESOLUTION_MODES.keys()) + ["custom"],
606
- help="Resolution mode preset (default: gundam)",
607
- )
608
- parser.add_argument(
609
- "--base-size",
610
- type=int,
611
- help="Base resolution size (overrides resolution-mode)",
612
- )
613
- parser.add_argument(
614
- "--image-size",
615
- type=int,
616
- help="Image tile size (overrides resolution-mode)",
617
- )
618
- parser.add_argument(
619
- "--crop-mode",
620
- action="store_true",
621
- help="Enable dynamic multi-tile cropping (overrides resolution-mode)",
622
- )
623
  parser.add_argument(
624
  "--max-model-len",
625
  type=int,
@@ -680,10 +550,6 @@ Examples:
680
  image_column=args.image_column,
681
  batch_size=args.batch_size,
682
  model=args.model,
683
- resolution_mode=args.resolution_mode,
684
- base_size=args.base_size,
685
- image_size=args.image_size,
686
- crop_mode=args.crop_mode if args.crop_mode else None,
687
  max_model_len=args.max_model_len,
688
  max_tokens=args.max_tokens,
689
  gpu_memory_utilization=args.gpu_memory_utilization,
 
23
  This script processes images through the DeepSeek-OCR model to extract
24
  text and structure as markdown, using vLLM for efficient batch processing.
25
 
26
+ Uses the official vLLM offline pattern: llm.generate() with PIL images
27
+ and NGramPerReqLogitsProcessor to prevent repetition on complex documents.
28
+ See: https://docs.vllm.ai/projects/recipes/en/latest/DeepSeek/DeepSeek-OCR.html
29
+
30
+ NOTE: Uses vLLM nightly wheels. First run may take a few minutes to download
31
+ and install dependencies.
32
 
33
  Features:
 
34
  - LaTeX equation recognition
35
  - Table extraction and formatting
36
  - Document structure preservation
 
40
  """
41
 
42
  import argparse
 
43
  import io
44
  import json
45
  import logging
46
  import os
47
  import sys
 
48
  from datetime import datetime
49
+ from typing import Any, Dict, Union
50
 
51
  import torch
52
  from datasets import load_dataset
 
55
  from toolz import partition_all
56
  from tqdm.auto import tqdm
57
  from vllm import LLM, SamplingParams
58
+ from vllm.model_executor.models.deepseek_ocr import NGramPerReqLogitsProcessor
59
 
60
  logging.basicConfig(level=logging.INFO)
61
  logger = logging.getLogger(__name__)
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  # Prompt mode presets (from DeepSeek-OCR GitHub)
64
  PROMPT_MODES = {
65
  "document": "<image>\n<|grounding|>Convert the document to markdown.",
 
80
  logger.info(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
81
 
82
 
83
+ def to_pil(image: Union[Image.Image, Dict[str, Any], str]) -> Image.Image:
84
+ """Convert various image formats to PIL Image."""
 
 
 
 
85
  if isinstance(image, Image.Image):
86
+ return image
87
  elif isinstance(image, dict) and "bytes" in image:
88
+ return Image.open(io.BytesIO(image["bytes"]))
89
  elif isinstance(image, str):
90
+ return Image.open(image)
91
  else:
92
  raise ValueError(f"Unsupported image type: {type(image)}")
93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
  def create_dataset_card(
96
  source_dataset: str,
 
101
  max_model_len: int,
102
  max_tokens: int,
103
  gpu_memory_utilization: float,
 
 
 
 
104
  image_column: str = "image",
105
  split: str = "train",
106
  ) -> str:
 
136
  - **Output Column**: `markdown`
137
  - **Dataset Split**: `{split}`
138
  - **Batch Size**: {batch_size}
 
 
 
 
139
  - **Max Model Length**: {max_model_len:,} tokens
140
  - **Max Output Tokens**: {max_tokens:,}
141
  - **GPU Memory Utilization**: {gpu_memory_utilization:.1%}
 
143
  ## Model Information
144
 
145
  DeepSeek-OCR is a state-of-the-art document OCR model that excels at:
146
+ - LaTeX equations - Mathematical formulas preserved in LaTeX format
147
+ - Tables - Extracted and formatted as HTML/markdown
148
+ - Document structure - Headers, lists, and formatting maintained
149
+ - Image grounding - Spatial layout and bounding box information
150
+ - Complex layouts - Multi-column and hierarchical structures
151
+ - Multilingual - Supports multiple languages
 
 
 
 
 
 
 
 
152
 
153
  ## Dataset Structure
154
 
 
184
  uv run https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py \\\\
185
  {source_dataset} \\\\
186
  <output-dataset> \\\\
 
187
  --image-column {image_column}
188
  ```
189
 
 
192
  - **Processing Speed**: ~{num_samples / (float(processing_time.split()[0]) * 60):.1f} images/second
193
  - **Processing Method**: Batch processing with vLLM (2-3x speedup over sequential)
194
 
195
+ Generated with [UV Scripts](https://huggingface.co/uv-scripts)
196
  """
197
 
198
 
 
200
  input_dataset: str,
201
  output_dataset: str,
202
  image_column: str = "image",
203
+ batch_size: int = 8,
204
  model: str = "deepseek-ai/DeepSeek-OCR",
 
 
 
 
205
  max_model_len: int = 8192,
206
  max_tokens: int = 8192,
207
  gpu_memory_utilization: float = 0.8,
 
222
  # Track processing start time
223
  start_time = datetime.now()
224
 
 
 
 
225
  # Login to HF if token provided
226
  HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
227
  if HF_TOKEN:
228
  login(token=HF_TOKEN)
229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230
  # Determine prompt
231
  if prompt is not None:
232
  final_prompt = prompt
 
262
  dataset = dataset.select(range(min(max_samples, len(dataset))))
263
  logger.info(f"Limited to {len(dataset)} samples")
264
 
265
+ # Initialize vLLM (matches official DeepSeek-OCR vLLM recipe)
266
  logger.info(f"Initializing vLLM with model: {model}")
267
  logger.info("This may take a few minutes on first run...")
268
 
 
269
  llm = LLM(
270
  model=model,
271
  trust_remote_code=True,
272
  max_model_len=max_model_len,
273
  gpu_memory_utilization=gpu_memory_utilization,
274
+ enable_prefix_caching=False,
275
+ mm_processor_cache_gb=0,
276
+ logits_processors=[NGramPerReqLogitsProcessor],
277
  )
278
 
279
  sampling_params = SamplingParams(
280
+ temperature=0.0,
281
  max_tokens=max_tokens,
282
+ skip_special_tokens=False,
283
+ extra_args=dict(
284
+ ngram_size=30,
285
+ window_size=90,
286
+ whitelist_token_ids={128821, 128822},
287
+ ),
288
  )
289
 
290
  logger.info(f"Processing {len(dataset)} images in batches of {batch_size}")
 
 
 
291
 
292
+ # Process images in batches using llm.generate() with PIL images
293
  all_markdown = []
294
 
295
  for batch_indices in tqdm(
 
301
  batch_images = [dataset[i][image_column] for i in batch_indices]
302
 
303
  try:
304
+ # Build model inputs with PIL images (official vLLM pattern)
305
+ model_inputs = []
306
+ for img in batch_images:
307
+ pil_img = to_pil(img).convert("RGB")
308
+ model_inputs.append(
309
+ {"prompt": final_prompt, "multi_modal_data": {"image": pil_img}}
310
+ )
311
 
312
+ # Process with vLLM generate API
313
+ outputs = llm.generate(model_inputs, sampling_params)
314
 
315
  # Extract outputs
316
  for output in outputs:
 
319
 
320
  except Exception as e:
321
  logger.error(f"Error processing batch: {e}")
 
322
  all_markdown.extend(["[OCR FAILED]"] * len(batch_images))
323
 
324
  # Calculate processing time
 
351
  "column_name": "markdown",
352
  "model_id": model,
353
  "processing_date": datetime.now().isoformat(),
 
 
 
 
354
  "prompt": final_prompt,
355
  "prompt_mode": prompt_mode if prompt is None else "custom",
356
  "batch_size": batch_size,
 
358
  "gpu_memory_utilization": gpu_memory_utilization,
359
  "max_model_len": max_model_len,
360
  "script": "deepseek-ocr-vllm.py",
361
+ "script_version": "2.0.0",
362
  "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py",
363
+ "implementation": "vllm (batch processing, llm.generate + NGramPerReqLogitsProcessor)",
364
  }
365
  existing_info.append(new_info)
366
 
 
383
  max_model_len=max_model_len,
384
  max_tokens=max_tokens,
385
  gpu_memory_utilization=gpu_memory_utilization,
 
 
 
 
386
  image_column=image_column,
387
  split=split,
388
  )
 
407
  print("\nThis script converts document images to markdown using")
408
  print("DeepSeek-OCR with vLLM for efficient batch processing.")
409
  print("\nFeatures:")
 
410
  print("- LaTeX equation recognition")
411
  print("- Table extraction and formatting")
412
  print("- Document structure preservation")
413
  print("- Image grounding and spatial layout")
414
  print("- Multilingual support")
415
+ print("- Fast batch processing with vLLM")
416
  print("\nExample usage:")
417
+ print("\n1. Basic OCR conversion (document mode with grounding):")
418
  print(" uv run deepseek-ocr-vllm.py document-images markdown-docs")
419
+ print("\n2. Parse figures from documents:")
420
  print(
421
+ " uv run deepseek-ocr-vllm.py scientific-papers figures --prompt-mode figure"
422
  )
423
+ print("\n3. Free OCR without layout:")
 
 
 
 
424
  print(" uv run deepseek-ocr-vllm.py images text --prompt-mode free")
425
+ print("\n4. Process a subset for testing:")
426
  print(
427
  " uv run deepseek-ocr-vllm.py large-dataset test-output --max-samples 10"
428
  )
429
+ print("\n5. Running on HF Jobs:")
 
 
 
430
  print(" hf jobs uv run --flavor l4x1 \\")
431
  print(" -s HF_TOKEN \\")
 
432
  print(
433
  " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py \\"
434
  )
 
442
  description="OCR images to markdown using DeepSeek-OCR (vLLM)",
443
  formatter_class=argparse.RawDescriptionHelpFormatter,
444
  epilog="""
 
 
 
 
 
 
 
445
  Prompt Modes:
446
  document Convert document to markdown with grounding (default)
447
  image OCR any image with grounding
 
450
  describe Generate detailed image descriptions
451
 
452
  Examples:
453
+ # Basic usage
454
  uv run deepseek-ocr-vllm.py my-images-dataset ocr-results
455
 
 
 
 
 
 
 
456
  # Parse figures from a document dataset
457
  uv run deepseek-ocr-vllm.py scientific-papers figures --prompt-mode figure
458
 
459
+ # Free OCR without layout
460
  uv run deepseek-ocr-vllm.py images text --prompt-mode free
461
 
462
  # Custom prompt for specific task
463
+ uv run deepseek-ocr-vllm.py dataset output --prompt "<image>\\nExtract all table data."
 
 
 
464
 
465
  # With custom batch size for performance tuning
466
  uv run deepseek-ocr-vllm.py dataset output --batch-size 16 --max-model-len 16384
467
+
468
+ # Running on HF Jobs
469
+ hf jobs uv run --flavor l4x1 -s HF_TOKEN \\
470
+ https://huggingface.co/datasets/uv-scripts/ocr/raw/main/deepseek-ocr-vllm.py \\
471
+ my-dataset my-output --max-samples 10
472
  """,
473
  )
474
 
 
490
  default="deepseek-ai/DeepSeek-OCR",
491
  help="Model to use (default: deepseek-ai/DeepSeek-OCR)",
492
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493
  parser.add_argument(
494
  "--max-model-len",
495
  type=int,
 
550
  image_column=args.image_column,
551
  batch_size=args.batch_size,
552
  model=args.model,
 
 
 
 
553
  max_model_len=args.max_model_len,
554
  max_tokens=args.max_tokens,
555
  gpu_memory_utilization=args.gpu_memory_utilization,