Skip to content

Commit daae930

Browse files
schoennenbeckgshtras
authored andcommitted
[Frontend] Fix chat template content format detection (vllm-project#18190)
Signed-off-by: Sebastian Schönnenbeck <sebastian.schoennenbeck@comma-soft.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com>
1 parent 18e107a commit daae930

File tree

1 file changed

+3
-4
lines changed

1 file changed

+3
-4
lines changed

vllm/entrypoints/chat_utils.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,6 @@ def resolve_hf_chat_template(
387387
def _resolve_chat_template_content_format(
388388
chat_template: Optional[str],
389389
tools: Optional[list[dict[str, Any]]],
390-
given_format: ChatTemplateContentFormatOption,
391390
tokenizer: AnyTokenizer,
392391
*,
393392
model_config: ModelConfig,
@@ -408,7 +407,7 @@ def _resolve_chat_template_content_format(
408407
detected_format = ("string" if jinja_text is None else
409408
_detect_content_format(jinja_text, default="string"))
410409

411-
return detected_format if given_format == "auto" else given_format
410+
return detected_format
412411

413412

414413
@lru_cache
@@ -451,7 +450,6 @@ def resolve_chat_template_content_format(
451450
detected_format = _resolve_chat_template_content_format(
452451
chat_template,
453452
tools,
454-
given_format,
455453
tokenizer,
456454
model_config=model_config,
457455
)
@@ -462,7 +460,8 @@ def resolve_chat_template_content_format(
462460
detected_format=detected_format,
463461
)
464462

465-
return detected_format
463+
return detected_format if given_format == "auto" else given_format
464+
466465

467466

468467
ModalityStr = Literal["image", "audio", "video", "image_embeds"]

0 commit comments

Comments
 (0)