Skip to content

Commit fa90d4f

Browse files
committed
checkin before format.sh
1 parent 1a2aef3 commit fa90d4f

File tree

3 files changed

+48
-5
lines changed

3 files changed

+48
-5
lines changed

vllm/entrypoints/openai/protocol.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,11 @@ class UsageInfo(OpenAIBaseModel):
107107
completion_tokens: Optional[int] = 0
108108

109109

110+
class RequestResponseMetadata(BaseModel):
111+
request_id: str
112+
final_usage_info: Optional[UsageInfo] = None
113+
114+
110115
class JsonSchemaResponseFormat(OpenAIBaseModel):
111116
name: str
112117
description: Optional[str] = None

vllm/entrypoints/openai/serving_chat.py

Lines changed: 23 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,8 @@
2222
ChatCompletionRequest, ChatCompletionResponse,
2323
ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice,
2424
ChatCompletionStreamResponse, ChatMessage, DeltaFunctionCall, DeltaMessage,
25-
DeltaToolCall, ErrorResponse, FunctionCall, ToolCall, UsageInfo)
25+
DeltaToolCall, ErrorResponse, FunctionCall, ToolCall, UsageInfo,
26+
RequestResponseMetadata)
2627
from vllm.entrypoints.openai.serving_engine import (BaseModelPath,
2728
LoRAModulePath,
2829
OpenAIServing,
@@ -175,6 +176,11 @@ async def create_chat_completion(
175176
"--enable-auto-tool-choice and --tool-call-parser to be set")
176177

177178
request_id = f"chat-{random_uuid()}"
179+
180+
request_metadata = RequestResponseMetadata(request_id=request_id)
181+
if raw_request:
182+
raw_request.state.request_metadata = request_metadata
183+
178184
try:
179185
guided_decode_logits_processor = (
180186
await self._guided_decode_logits_processor(request, tokenizer))
@@ -241,11 +247,13 @@ async def create_chat_completion(
241247
# Streaming response
242248
if request.stream:
243249
return self.chat_completion_stream_generator(
244-
request, result_generator, request_id, conversation, tokenizer)
250+
request, result_generator, request_id, conversation, tokenizer,
251+
request_metadata)
245252

246253
try:
247254
return await self.chat_completion_full_generator(
248-
request, result_generator, request_id, conversation, tokenizer)
255+
request, result_generator, request_id, conversation, tokenizer,
256+
request_metadata)
249257
except ValueError as e:
250258
# TODO: Use a vllm-specific Validation Error
251259
return self.create_error_response(str(e))
@@ -262,6 +270,7 @@ async def chat_completion_stream_generator(
262270
request_id: str,
263271
conversation: List[ConversationMessage],
264272
tokenizer: AnyTokenizer,
273+
request_metadata: RequestResponseMetadata,
265274
) -> AsyncGenerator[str, None]:
266275
model_name = self.base_model_paths[0].name
267276
created_time = int(time.time())
@@ -580,6 +589,13 @@ async def chat_completion_stream_generator(
580589
exclude_unset=True, exclude_none=True))
581590
yield f"data: {final_usage_data}\n\n"
582591

592+
# report to FastAPI middleware aggregate number of completion tokens (across all choices)
593+
num_completion_tokens = sum(previous_num_tokens)
594+
request_metadata.final_usage_info = UsageInfo(
595+
prompt_tokens=num_prompt_tokens,
596+
completion_tokens=num_completion_tokens,
597+
total_tokens=num_prompt_tokens+num_completion_tokens)
598+
583599
except ValueError as e:
584600
# TODO: Use a vllm-specific Validation Error
585601
logger.error("error in chat completion stream generator: %s", e)
@@ -595,6 +611,7 @@ async def chat_completion_full_generator(
595611
request_id: str,
596612
conversation: List[ConversationMessage],
597613
tokenizer: AnyTokenizer,
614+
request_metadata: RequestResponseMetadata,
598615
) -> Union[ErrorResponse, ChatCompletionResponse]:
599616

600617
model_name = self.base_model_paths[0].name
@@ -714,6 +731,9 @@ async def chat_completion_full_generator(
714731
completion_tokens=num_generated_tokens,
715732
total_tokens=num_prompt_tokens + num_generated_tokens,
716733
)
734+
735+
request_metadata.final_usage_info = usage
736+
717737
response = ChatCompletionResponse(
718738
id=request_id,
719739
created=created_time,

vllm/entrypoints/openai/serving_completion.py

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,8 @@
1818
CompletionResponseChoice,
1919
CompletionResponseStreamChoice,
2020
CompletionStreamResponse,
21-
ErrorResponse, UsageInfo)
21+
ErrorResponse, UsageInfo,
22+
RequestResponseMetadata)
2223
# yapf: enable
2324
from vllm.entrypoints.openai.serving_engine import (BaseModelPath,
2425
LoRAModulePath,
@@ -94,6 +95,10 @@ async def create_completion(
9495
request_id = f"cmpl-{random_uuid()}"
9596
created_time = int(time.time())
9697

98+
request_metadata = RequestResponseMetadata(request_id=request_id)
99+
if raw_request:
100+
raw_request.state.request_metadata = request_metadata
101+
97102
# Schedule the request and get the result generator.
98103
generators: List[AsyncGenerator[RequestOutput, None]] = []
99104
try:
@@ -171,7 +176,8 @@ async def create_completion(
171176
created_time,
172177
model_name,
173178
num_prompts=len(prompts),
174-
tokenizer=tokenizer)
179+
tokenizer=tokenizer,
180+
request_metadata=request_metadata)
175181

176182
# Non-streaming response
177183
final_res_batch: List[Optional[RequestOutput]] = [None] * len(prompts)
@@ -198,6 +204,7 @@ async def create_completion(
198204
created_time,
199205
model_name,
200206
tokenizer,
207+
request_metadata,
201208
)
202209
except asyncio.CancelledError:
203210
return self.create_error_response("Client disconnected")
@@ -227,6 +234,7 @@ async def completion_stream_generator(
227234
model_name: str,
228235
num_prompts: int,
229236
tokenizer: AnyTokenizer,
237+
request_metadata: RequestResponseMetadata,
230238
) -> AsyncGenerator[str, None]:
231239
num_choices = 1 if request.n is None else request.n
232240
previous_text_lens = [0] * num_choices * num_prompts
@@ -346,6 +354,13 @@ async def completion_stream_generator(
346354
exclude_unset=False, exclude_none=True))
347355
yield f"data: {final_usage_data}\n\n"
348356

357+
# report to FastAPI middleware aggregate tokens (all prompts, all completions)
358+
total_prompt_tokens = sum(num_prompt_tokens)
359+
total_completion_tokens = sum(previous_num_tokens)
360+
request_metadata.final_usage_info = UsageInfo(prompt_tokens=total_prompt_tokens,
361+
completion_tokens=total_completion_tokens,
362+
total_tokens=total_prompt_tokens + total_completion_tokens)
363+
349364
except ValueError as e:
350365
# TODO: Use a vllm-specific Validation Error
351366
data = self.create_streaming_error_response(str(e))
@@ -360,6 +375,7 @@ def request_output_to_completion_response(
360375
created_time: int,
361376
model_name: str,
362377
tokenizer: AnyTokenizer,
378+
request_metadata: RequestResponseMetadata,
363379
) -> CompletionResponse:
364380
choices: List[CompletionResponseChoice] = []
365381
num_prompt_tokens = 0
@@ -433,6 +449,8 @@ def request_output_to_completion_response(
433449
total_tokens=num_prompt_tokens + num_generated_tokens,
434450
)
435451

452+
request_metadata.final_usage_info = usage
453+
436454
return CompletionResponse(
437455
id=request_id,
438456
created=created_time,

0 commit comments

Comments
 (0)