Skip to content

Commit 701b0fd

Browse files
authored
[Enhancement] Add padding for ACL Graph (vllm-project#803)
### What this PR does / why we need it? Add padding for ACL Graph and refactor graph batch size adjustments to utils.py --------- Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com>
1 parent efabd72 commit 701b0fd

File tree

4 files changed

+97
-79
lines changed

4 files changed

+97
-79
lines changed

vllm_ascend/attention/attention_v1.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@ class AscendAttentionState(Enum):
104104

105105
@dataclass
106106
class AscendMetadata:
107+
num_actual_tokens: int # Number of tokens excluding padding.
107108
# (batch_size, max_blocks_per_seq).
108109
# Block addresses per sequence. (Seq id -> list of physical block)
109110
block_tables: torch.Tensor
@@ -125,7 +126,6 @@ class AscendMetadata:
125126
is_only_prefill: bool = False
126127
# Current state of this attention run.
127128
attn_state: AscendAttentionState = AscendAttentionState.ChunkedPrefill
128-
129129
attn_mask: Optional[torch.Tensor] = None
130130

131131

@@ -149,7 +149,8 @@ def build(self, num_reqs, num_actual_tokens, max_query_len,
149149
attn_mask = self.runner.attn_mask
150150
attn_state = self.runner.attn_state
151151

152-
attn_metadata = AscendMetadata(block_tables=block_table,
152+
attn_metadata = AscendMetadata(num_actual_tokens=num_actual_tokens,
153+
block_tables=block_table,
153154
query_lens=query_lens,
154155
seq_lens=seq_lens,
155156
max_query_len=max_query_len,
@@ -234,9 +235,9 @@ def forward(
234235
output=output,
235236
layer_name=layer.layer_name)
236237
else:
237-
num_tokens = query.shape[0]
238238
if attn_metadata is None:
239239
return output.view(num_tokens, self.hidden_size)
240+
num_actual_tokens = attn_metadata.num_actual_tokens
240241
assert layer._k_scale_float == 1.0 and layer._v_scale_float == 1.0
241242
attn_type = self.attn_type
242243
if attn_type != AttentionType.DECODER:
@@ -255,11 +256,12 @@ def forward(
255256
if self.key_cache is None:
256257
self.key_cache, self.value_cache = kv_cache[0], kv_cache[1]
257258
slots = attn_metadata.slot_mapping
258-
torch_npu._npu_reshape_and_cache(key=key,
259-
value=value,
260-
key_cache=self.key_cache,
261-
value_cache=self.value_cache,
262-
slot_indices=slots)
259+
torch_npu._npu_reshape_and_cache(
260+
key=key[:num_actual_tokens],
261+
value=value[:num_actual_tokens],
262+
key_cache=self.key_cache,
263+
value_cache=self.value_cache,
264+
slot_indices=slots)
263265

264266
if hasattr(layer, 'quant_method'):
265267
# TODO: Add attr (num_prefills, prefill_metadata, decode_metadata) to AscendMetadata

vllm_ascend/platform.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@
2525
from vllm.platforms import Platform, PlatformEnum
2626
from vllm.utils import supports_dynamo
2727

28+
from vllm_ascend.utils import update_aclgraph_sizes
29+
2830
CUSTOM_OP_ENABLED = False
2931
try:
3032
# register custom ops into torch_library here
@@ -144,6 +146,7 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
144146
compilation_config.use_inductor = False
145147
compilation_config.splitting_ops.extend(
146148
["vllm.unified_ascend_attention_with_output"])
149+
update_aclgraph_sizes(vllm_config)
147150

148151
if vllm_config.additional_config is not None:
149152
enable_graph_mode = vllm_config.additional_config.get(

vllm_ascend/utils.py

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,28 @@
1616
# This file is a part of the vllm-ascend project.
1717
# Adapted from vllm-project/vllm/vllm/worker/worker.py
1818
#
19+
20+
import math
21+
from typing import TYPE_CHECKING
22+
1923
import torch
2024
from packaging.version import InvalidVersion, Version
2125
from vllm.logger import logger
2226

2327
import vllm_ascend.envs as envs
2428

29+
if TYPE_CHECKING:
30+
from vllm.config import VllmConfig
31+
else:
32+
VllmConfig = None
33+
34+
# NOTE: Currently, we can only capture 1920 graphs at most,
35+
# due to the limitation of ACL graph. This number is bounded by
36+
# the number of streams, which is 2048, we save 128 streams
37+
# as a buffer.
38+
# Maximum number of graphs that can be captured by ACL Graph
39+
MAX_CAPTURE_SIZE = 1920
40+
2541

2642
def try_register_lib(lib_name: str, lib_info: str = ""):
2743
import importlib
@@ -99,3 +115,55 @@ def vllm_version_is(target_vllm_version: str):
99115
"is installed probably. Set the environment variable VLLM_VERSION "
100116
"to control it by hand. And please make sure the vaule follows the "
101117
"format of x.y.z.")
118+
119+
120+
def update_aclgraph_sizes(vllm_config: VllmConfig) -> None:
121+
"""Update ACL graph capture sizes based on hardware limitations"""
122+
# Store original configuration and temporarily clear it
123+
compilation_config = vllm_config.compilation_config
124+
original_sizes, compilation_config.cudagraph_capture_sizes = \
125+
compilation_config.cudagraph_capture_sizes, None
126+
127+
# Calculate parallel configuration factor (increases with DP or TP)
128+
# TODO(Yizhou): This is a temporary solution, need to be improved
129+
# in the future, taking into account the other parallel configurations.
130+
num_hidden_layers = vllm_config.model_config.hf_config.num_hidden_layers
131+
parallel_config = vllm_config.parallel_config
132+
parallel_factor = 1 + sum(size > 1 for size in [
133+
parallel_config.data_parallel_size,
134+
parallel_config.tensor_parallel_size
135+
])
136+
137+
# Calculate maximum supported batch sizes considering model architecture
138+
max_num_batch_sizes = math.floor(MAX_CAPTURE_SIZE /
139+
(num_hidden_layers + 1) / parallel_factor)
140+
logger.info("Calculated maximum supported batch sizes for ACL graph: %s",
141+
max_num_batch_sizes)
142+
143+
# If original sizes exceed maximum, sample a representative subset
144+
if max_num_batch_sizes < len(original_sizes):
145+
# Sample uniformly from original sizes
146+
step = (len(original_sizes) - 1) / (max_num_batch_sizes - 1)
147+
indices = [round(i * step) for i in range(max_num_batch_sizes)]
148+
149+
# Ensure first and last elements are preserved
150+
indices[0], indices[-1] = 0, len(original_sizes) - 1
151+
152+
sampled_sizes = [original_sizes[i] for i in indices]
153+
compilation_config.init_with_cudagraph_sizes(sampled_sizes)
154+
155+
logger.info(
156+
"Adjusted ACL graph batch sizes for %s model (layers: %d): %d → %d sizes",
157+
vllm_config.model_config.architectures[0],
158+
num_hidden_layers,
159+
len(original_sizes),
160+
len(compilation_config.
161+
cudagraph_capture_sizes # type: ignore[arg-type]
162+
))
163+
else:
164+
# No adjustment needed
165+
compilation_config.cudagraph_capture_sizes = original_sizes
166+
logger.info(
167+
"No adjustment needed for ACL graph batch sizes: %s model (layers: %d) with %d sizes",
168+
vllm_config.model_config.architectures[0], num_hidden_layers,
169+
len(original_sizes))

vllm_ascend/worker/model_runner_v1.py

Lines changed: 16 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
#
1919

2020
import gc
21-
import math
2221
import os
2322
import time
2423
import weakref
@@ -293,9 +292,9 @@ def __init__(self, vllm_config: VllmConfig, device: torch.device):
293292
device="cpu")
294293
self.attn_mask = None
295294
self.attn_state = None
296-
self.use_npu_graph = (self.vllm_config.compilation_config.level
297-
== CompilationLevel.PIECEWISE
298-
and not self.model_config.enforce_eager)
295+
self.use_aclgraph = (self.vllm_config.compilation_config.level
296+
== CompilationLevel.PIECEWISE
297+
and not self.model_config.enforce_eager)
299298
self.aclgraph_batch_sizes = list(
300299
reversed(
301300
self.vllm_config.compilation_config.cudagraph_capture_sizes))
@@ -508,6 +507,13 @@ def _process_reqs(
508507
assert total_num_scheduled_tokens > 0
509508
num_reqs = self.input_batch.num_reqs
510509
assert num_reqs > 0
510+
if (self.use_aclgraph and
511+
total_num_scheduled_tokens <= self.aclgraph_batch_sizes[-1]):
512+
# Add padding to the batch size.
513+
num_input_tokens = self.vllm_config.pad_for_cudagraph(
514+
total_num_scheduled_tokens)
515+
else:
516+
num_input_tokens = total_num_scheduled_tokens
511517

512518
modified_batch = self.attn_metadata_builder.reorder_batch(
513519
self.input_batch, scheduler_output)
@@ -546,7 +552,7 @@ def _process_reqs(
546552

547553
self.positions[:total_num_scheduled_tokens].copy_(
548554
self.positions_cpu[:total_num_scheduled_tokens], non_blocking=True)
549-
positions = self.positions[:total_num_scheduled_tokens]
555+
positions = self.positions[:num_input_tokens]
550556
self.query_lens = torch.from_numpy(num_scheduled_tokens)
551557

552558
self.seq_lens_np[:num_reqs] = (
@@ -605,7 +611,7 @@ def _process_reqs(
605611
# Copy the tensors to the NPU.
606612
self.input_ids[:total_num_scheduled_tokens].copy_(
607613
self.input_ids_cpu[:total_num_scheduled_tokens], non_blocking=True)
608-
input_ids = self.input_ids[:total_num_scheduled_tokens]
614+
input_ids = self.input_ids[:num_input_tokens]
609615

610616
if self.enable_torchair_graph_mode and attn_metadata.attn_state == AscendAttentionState.DecodeOnly:
611617
padding = torch.zeros(graph_pad_size,
@@ -615,7 +621,9 @@ def _process_reqs(
615621
positions = torch.cat([positions, padding])
616622

617623
# Run forward pass
618-
with set_forward_context(attn_metadata, self.vllm_config):
624+
with set_forward_context(attn_metadata,
625+
self.vllm_config,
626+
num_tokens=num_input_tokens):
619627
model_kwargs = {}
620628
if self.enable_torchair_graph_mode:
621629
model_kwargs["kv_caches"] = self.kv_caches
@@ -1062,17 +1070,14 @@ def get_kv_cache_spec(self) -> dict[str, KVCacheSpec]:
10621070
return kv_cache_spec
10631071

10641072
def capture_model(self) -> None:
1065-
if not self.use_npu_graph:
1073+
if not self.use_aclgraph:
10661074
logger.warning(
10671075
"Skipping NPU graph capture. Please add "
10681076
"-O %s to use NPU graphs.", CompilationLevel.PIECEWISE)
10691077
return
10701078

10711079
start_time = time.perf_counter()
10721080
start_free_npu_memory = torch.npu.mem_get_info()[0]
1073-
# Since vllm aclgraph_batch_sizes is too large,
1074-
# we need to adjust its length to proper size.
1075-
self.verify_adjust_aclgraph_batch_sizes()
10761081

10771082
# Trigger ACL graph capture for specific shapes.
10781083
# Capture the large shapes first so that the smaller shapes
@@ -1091,63 +1096,3 @@ def capture_model(self) -> None:
10911096
# This usually takes 5~20 seconds.
10921097
logger.info("Graph capturing finished in %.0f secs, took %.2f GiB",
10931098
elapsed_time, npu_graph_size / (1 << 30))
1094-
1095-
def verify_adjust_aclgraph_batch_sizes(self) -> None:
1096-
# Now, vllm-ascend support max capture size is 1920
1097-
max_capture_size = 1920
1098-
original_aclgraph_batch_sizes = self.aclgraph_batch_sizes
1099-
num_hidden_layers = self.vllm_config.model_config.hf_config.num_hidden_layers
1100-
max_support_len_aclgraph = self.get_max_support_len(
1101-
max_capture_size, num_hidden_layers)
1102-
1103-
if max_support_len_aclgraph < len(original_aclgraph_batch_sizes):
1104-
self.aclgraph_batch_sizes = self.sample_from_list(
1105-
max_support_len_aclgraph)
1106-
1107-
logger.info(
1108-
"Model:%s-num_hidden_layers:%d will adjust aclgraph_batch_sizes, pre-adjust-len: %s, post-adjust-len: %s",
1109-
self.vllm_config.model_config.architectures[0],
1110-
num_hidden_layers, len(original_aclgraph_batch_sizes),
1111-
len(self.aclgraph_batch_sizes))
1112-
else:
1113-
logger.info(
1114-
"Model:%s-num_hidden_layers:%d no need adjust aclgraph_batch_sizes, list_len: %s",
1115-
self.vllm_config.model_config.architectures[0],
1116-
num_hidden_layers, len(original_aclgraph_batch_sizes))
1117-
1118-
def get_max_support_len(self, max_capture_size, num_hidden_layers) -> int:
1119-
parallel_type_cnt = 0
1120-
dp_size = self.vllm_config.parallel_config.data_parallel_size
1121-
tp_size = self.vllm_config.parallel_config.tensor_parallel_size
1122-
if dp_size > 1:
1123-
parallel_type_cnt += 1
1124-
if tp_size > 1:
1125-
parallel_type_cnt += 1
1126-
max_support_len_aclgraph = math.floor(max_capture_size /
1127-
(num_hidden_layers + 1) /
1128-
(parallel_type_cnt + 1))
1129-
logger.info(
1130-
"max_capture_size:%s, dp_size:%s, tp_size:%s, parallel_type_cnt:%s, max_support_len_aclgraph: %s:",
1131-
max_capture_size,
1132-
dp_size,
1133-
tp_size,
1134-
parallel_type_cnt,
1135-
max_support_len_aclgraph,
1136-
)
1137-
1138-
return max_support_len_aclgraph
1139-
1140-
def sample_from_list(self, sample_len) -> list[int]:
1141-
# we use this function to sample a new list from old list by given length, and maintain uniformity, for example:
1142-
# original: [1 8 16 24 32 40 48 56 64]
1143-
# --> sample length = 3: [1 32 64]
1144-
# --> sample length = 5: [1 16 32 48 64]
1145-
original_len = len(self.aclgraph_batch_sizes)
1146-
step = (original_len - 1) / (sample_len - 1)
1147-
indices = [round(i * step) for i in range(sample_len)]
1148-
# Align first and last element of the original list and sub-list
1149-
indices[0] = 0
1150-
indices[-1] = original_len - 1
1151-
# Sample new list
1152-
new_list = [self.aclgraph_batch_sizes[i] for i in indices]
1153-
return new_list

0 commit comments

Comments
 (0)