yusufs commited on
Commit
d51e450
·
1 Parent(s): 84c6c4a

fix(half-precision): use half precision for T4

Browse files
Files changed (1) hide show
  1. main.py +8 -2
main.py CHANGED
@@ -52,7 +52,13 @@ engine_llama_3_2: LLM = LLM(
52
  # https://github.com/vllm-project/vllm/blob/v0.6.4/vllm/config.py#L98-L102
53
  # max_model_len=32768,
54
  enforce_eager=True, # Disable CUDA graph
55
- dtype='auto', # Use 'half' if you want half precision
 
 
 
 
 
 
56
  )
57
 
58
  # ValueError: max_num_batched_tokens (512) is smaller than max_model_len (32768).
@@ -67,7 +73,7 @@ engine_sailor_chat: LLM = LLM(
67
  tensor_parallel_size=1,
68
  # max_model_len=32768,
69
  enforce_eager=True, # Disable CUDA graph
70
- dtype='auto', # Use 'half' if you want half precision
71
  )
72
 
73
 
 
52
  # https://github.com/vllm-project/vllm/blob/v0.6.4/vllm/config.py#L98-L102
53
  # max_model_len=32768,
54
  enforce_eager=True, # Disable CUDA graph
55
+
56
+ # File "/home/user/.local/lib/python3.12/site-packages/vllm/worker/worker.py",
57
+ # line 479, in _check_if_gpu_supports_dtype
58
+ # Bfloat16 is only supported on GPUs with compute capability of at least 8.0.
59
+ # Your Tesla T4 GPU has compute capability 7.5.
60
+ # You can use float16 instead by explicitly setting the`dtype` flag in CLI, for example: --dtype=half.
61
+ dtype='half', # Use 'half' for T4
62
  )
63
 
64
  # ValueError: max_num_batched_tokens (512) is smaller than max_model_len (32768).
 
73
  tensor_parallel_size=1,
74
  # max_model_len=32768,
75
  enforce_eager=True, # Disable CUDA graph
76
+ dtype='half', # Use 'half' for T4
77
  )
78
 
79