|
8 | 8 | from vllm.lora.request import LoRARequest
|
9 | 9 | from vllm.prompt_adapter.request import PromptAdapterRequest
|
10 | 10 | from vllm.transformers_utils.tokenizer_group import BaseTokenizerGroup
|
| 11 | +from vllm.utils import print_warning_once |
11 | 12 |
|
12 | 13 | from .data import (EncoderDecoderLLMInputs, LLMInputs, PromptInputs,
|
13 | 14 | SingletonPromptInputs)
|
@@ -71,20 +72,21 @@ def get_decoder_start_token_id(self) -> Optional[int]:
|
71 | 72 | '''
|
72 | 73 |
|
73 | 74 | if not self.is_encoder_decoder_model():
|
74 |
| - logger.warning("Using None for decoder start token id because " |
75 |
| - "this is not an encoder/decoder model.") |
| 75 | + print_warning_once("Using None for decoder start token id because " |
| 76 | + "this is not an encoder/decoder model.") |
76 | 77 | return None
|
77 | 78 |
|
78 | 79 | if (self.model_config is None or self.model_config.hf_config is None):
|
79 |
| - logger.warning("Using None for decoder start token id because " |
80 |
| - "model config is not available.") |
| 80 | + print_warning_once("Using None for decoder start token id because " |
| 81 | + "model config is not available.") |
81 | 82 | return None
|
82 | 83 |
|
83 | 84 | dec_start_token_id = getattr(self.model_config.hf_config,
|
84 | 85 | 'decoder_start_token_id', None)
|
85 | 86 | if dec_start_token_id is None:
|
86 |
| - logger.warning("Falling back on <BOS> for decoder start token id " |
87 |
| - "because decoder start token id is not available.") |
| 87 | + print_warning_once("Falling back on <BOS> for decoder start token " |
| 88 | + "id because decoder start token id is not " |
| 89 | + "available.") |
88 | 90 | dec_start_token_id = self.get_bos_token_id()
|
89 | 91 |
|
90 | 92 | return dec_start_token_id
|
|
0 commit comments