Skip to content

Commit e5530f0

Browse files
committed
use small max_num_batched_tokens for A100
Signed-off-by: KuntaiDu <kuntai@uchicago.edu>
1 parent 3d1e387 commit e5530f0

File tree

1 file changed

+5
-1
lines changed

1 file changed

+5
-1
lines changed

vllm/engine/arg_utils.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1457,11 +1457,15 @@ def _set_default_args_v1(self, usage_context: UsageContext) -> None:
14571457
from vllm.platforms import current_platform
14581458
try:
14591459
device_memory = current_platform.get_device_total_memory()
1460+
device_name = current_platform.get_device_name().lower()
14601461
except Exception:
14611462
# This is only used to set default_max_num_batched_tokens
14621463
device_memory = 0
14631464

1464-
if device_memory >= 70 * GiB_bytes:
1465+
# NOTE(Kuntai): Setting large `max_num_batched_tokens` for A100 reduces
1466+
# throughput, see PR # ??? for more details.
1467+
# So here we do an extra device name check to prevent such regression.
1468+
if device_memory >= 70 * GiB_bytes and "a100" not in device_name:
14651469
# For GPUs like H100 and MI300x, use larger default values.
14661470
default_max_num_batched_tokens = {
14671471
UsageContext.LLM_CLASS: 16384,

0 commit comments

Comments
 (0)