Skip to content

Commit ca0c255

Browse files
committed
Rename from_counts to from_token_counts
1 parent e33211c commit ca0c255

File tree

11 files changed

+11
-11
lines changed

11 files changed

+11
-11
lines changed

vllm/inputs/registry.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ def _default_dummy_data_factory(
125125
# Avoid circular import
126126
from vllm.sequence import SequenceData
127127

128-
dummy_seq_data = SequenceData.from_counts((0, seq_len))
128+
dummy_seq_data = SequenceData.from_token_counts((0, seq_len))
129129
dummy_multi_modal_data = None
130130

131131
return dummy_seq_data, dummy_multi_modal_data

vllm/model_executor/models/blip.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ def dummy_seq_data_for_blip(
6262
else:
6363
image_feature_size = image_feature_size_override
6464

65-
return SequenceData.from_counts(
65+
return SequenceData.from_token_counts(
6666
(image_token_id, image_feature_size * num_images),
6767
(0, seq_len - image_feature_size * num_images),
6868
)

vllm/model_executor/models/blip2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -427,7 +427,7 @@ def dummy_seq_data_for_blip2(
427427
else:
428428
image_feature_size = image_feature_size_override
429429

430-
return SequenceData.from_counts(
430+
return SequenceData.from_token_counts(
431431
(image_token_id, image_feature_size * num_images),
432432
(0, seq_len - image_feature_size * num_images),
433433
)

vllm/model_executor/models/chameleon.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def dummy_seq_data_for_chameleon(
7070
else:
7171
image_feature_size = image_feature_size_override
7272

73-
return SequenceData.from_counts(
73+
return SequenceData.from_token_counts(
7474
(image_token_id, image_feature_size * num_images),
7575
(0, seq_len - image_feature_size * num_images),
7676
)

vllm/model_executor/models/clip.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def dummy_seq_data_for_clip(
6161
else:
6262
image_feature_size = image_feature_size_override
6363

64-
return SequenceData.from_counts(
64+
return SequenceData.from_token_counts(
6565
(image_token_id, image_feature_size * num_images),
6666
(0, seq_len - image_feature_size * num_images),
6767
)

vllm/model_executor/models/minicpmv.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ def get_max_minicpmv_image_tokens(ctx: InputContext):
257257

258258

259259
def dummy_seq_data_for_minicpmv(seq_len: int, num_images: int):
260-
return SequenceData.from_counts((0, seq_len))
260+
return SequenceData.from_token_counts((0, seq_len))
261261

262262

263263
def dummy_image_for_minicpmv(hf_config: PretrainedConfig, num_images: int):

vllm/model_executor/models/pixtral.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def dummy_data_for_pixtral(ctx: InputContext, seq_len: int,
6161
image_feature_size = (size**2) // (patch_size**2)
6262

6363
num_image_tokens = image_feature_size * num_images
64-
seq_data = SequenceData.from_counts(
64+
seq_data = SequenceData.from_token_counts(
6565
(image_token_id, num_image_tokens),
6666
(0, seq_len - num_image_tokens),
6767
)

vllm/model_executor/models/qwen.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -817,7 +817,7 @@ def dummy_data_for_qwen(
817817
# The presence of a visual config indicates this is a multimodal model.
818818
# If we don't have it, the model is considered an LLM for warmup purposes.
819819
if not hasattr(hf_config, "visual"):
820-
seq_data = SequenceData.from_counts((0, seq_len))
820+
seq_data = SequenceData.from_token_counts((0, seq_len))
821821
mm_data = None
822822
return seq_data, mm_data
823823

vllm/model_executor/models/qwen2_vl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -680,7 +680,7 @@ def dummy_data_for_qwen2_vl(
680680

681681
hf_config = ctx.get_hf_config(Qwen2VLConfig)
682682

683-
dummy_seqdata = SequenceData.from_counts(
683+
dummy_seqdata = SequenceData.from_token_counts(
684684
(hf_config.vision_start_token_id, 1),
685685
(hf_config.image_token_id, max_llm_image_tokens),
686686
(hf_config.vision_end_token_id, 1),

vllm/model_executor/models/siglip.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ def dummy_seq_data_for_siglip(
6666
else:
6767
image_feature_size = image_feature_size_override
6868

69-
return SequenceData.from_counts(
69+
return SequenceData.from_token_counts(
7070
(image_token_id, image_feature_size * num_images),
7171
(0, seq_len - image_feature_size * num_images),
7272
)

vllm/sequence.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ class SequenceData(msgspec.Struct,
171171
_mrope_position_delta: Optional[int] = None
172172

173173
@staticmethod
174-
def from_counts(*token_counts: Tuple[int, int]) -> "SequenceData":
174+
def from_token_counts(*token_counts: Tuple[int, int]) -> "SequenceData":
175175
if len(token_counts) == 0:
176176
return SequenceData.from_seqs([])
177177

0 commit comments

Comments
 (0)