davanstrien HF Staff commited on
Commit
c7d345d
·
1 Parent(s): 9be50d6

Update PaddleOCR-VL configuration for vLLM compatibility and enhance model parameters

Browse files
Files changed (1) hide show
  1. paddleocr-vl.py +8 -0
paddleocr-vl.py CHANGED
@@ -9,6 +9,7 @@
9
  # "toolz",
10
  # "torch",
11
  # "pyarrow",
 
12
  # ]
13
  #
14
  # [[tool.uv.index]]
@@ -388,12 +389,19 @@ def main(
388
  logger.info(f"Initializing vLLM with {model_name}")
389
  logger.info("This may take a minute on first run (model is only 0.9B)...")
390
 
 
 
 
 
391
  llm = LLM(
392
  model=model_name,
393
  trust_remote_code=True,
394
  max_model_len=max_model_len,
395
  gpu_memory_utilization=gpu_memory_utilization,
396
  limit_mm_per_prompt={"image": 1},
 
 
 
397
  )
398
 
399
  # Sampling parameters - deterministic for OCR
 
9
  # "toolz",
10
  # "torch",
11
  # "pyarrow",
12
+ # "transformers",
13
  # ]
14
  #
15
  # [[tool.uv.index]]
 
389
  logger.info(f"Initializing vLLM with {model_name}")
390
  logger.info("This may take a minute on first run (model is only 0.9B)...")
391
 
392
+ # Note: PaddleOCR-VL requires specific vLLM configuration
393
+ # The model needs custom implementation files to be loaded
394
+ os.environ["VLLM_USE_V1"] = "0" # Disable V1 engine for compatibility
395
+
396
  llm = LLM(
397
  model=model_name,
398
  trust_remote_code=True,
399
  max_model_len=max_model_len,
400
  gpu_memory_utilization=gpu_memory_utilization,
401
  limit_mm_per_prompt={"image": 1},
402
+ max_num_batched_tokens=16384, # Match server config
403
+ enable_prefix_caching=False, # Disable prefix caching like server
404
+ enforce_eager=True, # Use eager mode instead of CUDA graphs
405
  )
406
 
407
  # Sampling parameters - deterministic for OCR