eitans-pu commited on
Commit
2ed8bde
·
verified ·
1 Parent(s): 60eb65a

Upload generate-responses.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. generate-responses.py +5 -3
generate-responses.py CHANGED
@@ -2,6 +2,7 @@
2
  # requires-python = ">=3.10"
3
  # dependencies = [
4
  # "datasets",
 
5
  # "huggingface-hub",
6
  # "hf-xet>=1.1.7",
7
  # "torch",
@@ -231,15 +232,16 @@ def main(
231
 
232
  # Initialize vLLM
233
  logger.info(f"Loading model: {model_id}")
 
 
234
  vllm_kwargs = {
235
  "model": model_id,
236
  "tensor_parallel_size": tensor_parallel_size,
237
  "gpu_memory_utilization": gpu_memory_utilization,
238
  "enable_prefix_caching": True,
 
239
  }
240
- if max_model_len is not None:
241
- vllm_kwargs["max_model_len"] = max_model_len
242
- logger.info(f"Using max_model_len={max_model_len}")
243
 
244
  llm = LLM(**vllm_kwargs)
245
  logger.info("Prefix caching enabled (system prompt shared across all cases)")
 
2
  # requires-python = ">=3.10"
3
  # dependencies = [
4
  # "datasets",
5
+ # "hf_transfer",
6
  # "huggingface-hub",
7
  # "hf-xet>=1.1.7",
8
  # "torch",
 
232
 
233
  # Initialize vLLM
234
  logger.info(f"Loading model: {model_id}")
235
+ if max_model_len is None:
236
+ max_model_len = 16384 # sensible default; most prompts are <8K tokens
237
  vllm_kwargs = {
238
  "model": model_id,
239
  "tensor_parallel_size": tensor_parallel_size,
240
  "gpu_memory_utilization": gpu_memory_utilization,
241
  "enable_prefix_caching": True,
242
+ "max_model_len": max_model_len,
243
  }
244
+ logger.info(f"Using max_model_len={max_model_len}")
 
 
245
 
246
  llm = LLM(**vllm_kwargs)
247
  logger.info("Prefix caching enabled (system prompt shared across all cases)")