Skip to content

Commit 6a5ab37

Browse files
authored
Revert "[Misc][Bugfix] Disable guided decoding for mistral tokenizer (#8521)"
This reverts commit ee2bcea.
1 parent 4c34ce8 commit 6a5ab37

File tree

1 file changed

+0
-23
lines changed

1 file changed

+0
-23
lines changed

vllm/model_executor/guided_decoding/__init__.py

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
from vllm.model_executor.guided_decoding.guided_fields import (
77
GuidedDecodingRequest)
88
from vllm.sampling_params import LogitsProcessor
9-
from vllm.transformers_utils.tokenizer import MistralTokenizer
109

1110

1211
async def get_guided_decoding_logits_processor(
@@ -16,23 +15,12 @@ async def get_guided_decoding_logits_processor(
1615
request = _adapt_request_for_tool_use(request)
1716

1817
if guided_decoding_backend == 'outlines':
19-
if isinstance(tokenizer, MistralTokenizer):
20-
raise NotImplementedError(
21-
"Guided decoding with 'outlines' is currently not supported "
22-
"for Mistral tokenizer. Please consider contributing to the "
23-
"'outlines' project if you are interested in this feature.")
2418
# NOTE: lazy import outlines to avoid https://github.com/vllm-project/vllm/issues/4193
2519
from vllm.model_executor.guided_decoding.outlines_decoding import ( # noqa
2620
get_outlines_guided_decoding_logits_processor)
2721
return await get_outlines_guided_decoding_logits_processor(
2822
request, tokenizer)
2923
if guided_decoding_backend == 'lm-format-enforcer':
30-
if isinstance(tokenizer, MistralTokenizer):
31-
raise NotImplementedError(
32-
"Guided decoding with 'lm-format-enforcer' is currently not "
33-
"supported for Mistral tokenizer. Please consider contributing "
34-
"to the 'lm-format-enforcer' project if you are interested "
35-
"in this feature.")
3624
from vllm.model_executor.guided_decoding.lm_format_enforcer_decoding import ( # noqa
3725
get_lm_format_enforcer_guided_decoding_logits_processor)
3826
return await get_lm_format_enforcer_guided_decoding_logits_processor(
@@ -49,23 +37,12 @@ def get_local_guided_decoding_logits_processor(
4937
# request = _adapt_request_for_tool_use(request)
5038

5139
if guided_decoding_backend == 'outlines':
52-
if isinstance(tokenizer, MistralTokenizer):
53-
raise NotImplementedError(
54-
"Guided decoding with 'outlines' is currently not supported "
55-
"for Mistral tokenizer. Please consider contributing to the "
56-
"'outlines' project if you are interested in this feature.")
5740
# NOTE: lazy import outlines to avoid https://github.com/vllm-project/vllm/issues/4193
5841
from vllm.model_executor.guided_decoding.outlines_decoding import ( # noqa
5942
get_local_outlines_guided_decoding_logits_processor)
6043
return get_local_outlines_guided_decoding_logits_processor(
6144
guided_options, tokenizer)
6245
if guided_decoding_backend == 'lm-format-enforcer':
63-
if isinstance(tokenizer, MistralTokenizer):
64-
raise NotImplementedError(
65-
"Guided decoding with 'lm-format-enforcer' is currently not "
66-
"supported for Mistral tokenizer. Please consider contributing "
67-
"to the 'lm-format-enforcer' project if you are interested "
68-
"in this feature.")
6946
from vllm.model_executor.guided_decoding.lm_format_enforcer_decoding import ( # noqa
7047
get_local_lm_format_enforcer_guided_decoding_logits_processor)
7148
return get_local_lm_format_enforcer_guided_decoding_logits_processor(

0 commit comments

Comments
 (0)