@@ -295,25 +295,49 @@ async def test_completion_stream_options(client: openai.AsyncOpenAI,
295
295
model_name : str ):
296
296
prompt = "What is the capital of France?"
297
297
298
- # Test stream=True, stream_options={"include_usage": False}
299
- stream = await client .completions .create (
300
- model = model_name ,
301
- prompt = prompt ,
302
- max_tokens = 5 ,
303
- temperature = 0.0 ,
304
- stream = True ,
305
- stream_options = {"include_usage" : False })
298
+ # Test stream=True, stream_options=
299
+ # {"include_usage": False, "continuous_usage_stats": False}
300
+ stream = await client .completions .create (model = model_name ,
301
+ prompt = prompt ,
302
+ max_tokens = 5 ,
303
+ temperature = 0.0 ,
304
+ stream = True ,
305
+ stream_options = {
306
+ "include_usage" : False ,
307
+ "continuous_usage_stats" :
308
+ False ,
309
+ })
310
+
306
311
async for chunk in stream :
307
312
assert chunk .usage is None
308
313
309
- # Test stream=True, stream_options={"include_usage": True}
310
- stream = await client .completions .create (
311
- model = model_name ,
312
- prompt = prompt ,
313
- max_tokens = 5 ,
314
- temperature = 0.0 ,
315
- stream = True ,
316
- stream_options = {"include_usage" : True })
314
+ # Test stream=True, stream_options=
315
+ # {"include_usage": False, "continuous_usage_stats": True}
316
+ stream = await client .completions .create (model = model_name ,
317
+ prompt = prompt ,
318
+ max_tokens = 5 ,
319
+ temperature = 0.0 ,
320
+ stream = True ,
321
+ stream_options = {
322
+ "include_usage" : False ,
323
+ "continuous_usage_stats" :
324
+ True ,
325
+ })
326
+ async for chunk in stream :
327
+ assert chunk .usage is None
328
+
329
+ # Test stream=True, stream_options=
330
+ # {"include_usage": True, "continuous_usage_stats": False}
331
+ stream = await client .completions .create (model = model_name ,
332
+ prompt = prompt ,
333
+ max_tokens = 5 ,
334
+ temperature = 0.0 ,
335
+ stream = True ,
336
+ stream_options = {
337
+ "include_usage" : True ,
338
+ "continuous_usage_stats" :
339
+ False ,
340
+ })
317
341
async for chunk in stream :
318
342
if chunk .choices [0 ].finish_reason is None :
319
343
assert chunk .usage is None
@@ -328,7 +352,36 @@ async def test_completion_stream_options(client: openai.AsyncOpenAI,
328
352
final_chunk .usage .completion_tokens )
329
353
assert final_chunk .choices == []
330
354
331
- # Test stream=False, stream_options={"include_usage": None}
355
+ # Test stream=True, stream_options=
356
+ # {"include_usage": True, "continuous_usage_stats": True}
357
+ stream = await client .completions .create (model = model_name ,
358
+ prompt = prompt ,
359
+ max_tokens = 5 ,
360
+ temperature = 0.0 ,
361
+ stream = True ,
362
+ stream_options = {
363
+ "include_usage" : True ,
364
+ "continuous_usage_stats" :
365
+ True ,
366
+ })
367
+ async for chunk in stream :
368
+ assert chunk .usage is not None
369
+ assert chunk .usage .prompt_tokens > 0
370
+ assert chunk .usage .completion_tokens > 0
371
+ assert chunk .usage .total_tokens == (chunk .usage .prompt_tokens +
372
+ chunk .usage .completion_tokens )
373
+ if chunk .choices [0 ].finish_reason is not None :
374
+ final_chunk = await stream .__anext__ ()
375
+ assert final_chunk .usage is not None
376
+ assert final_chunk .usage .prompt_tokens > 0
377
+ assert final_chunk .usage .completion_tokens > 0
378
+ assert final_chunk .usage .total_tokens == (
379
+ final_chunk .usage .prompt_tokens +
380
+ final_chunk .usage .completion_tokens )
381
+ assert final_chunk .choices == []
382
+
383
+ # Test stream=False, stream_options=
384
+ # {"include_usage": None}
332
385
with pytest .raises (BadRequestError ):
333
386
await client .completions .create (model = model_name ,
334
387
prompt = prompt ,
@@ -337,7 +390,8 @@ async def test_completion_stream_options(client: openai.AsyncOpenAI,
337
390
stream = False ,
338
391
stream_options = {"include_usage" : None })
339
392
340
- # Test stream=False, stream_options={"include_usage": True}
393
+ # Test stream=False, stream_options=
394
+ # {"include_usage": True}
341
395
with pytest .raises (BadRequestError ):
342
396
await client .completions .create (model = model_name ,
343
397
prompt = prompt ,
@@ -346,6 +400,28 @@ async def test_completion_stream_options(client: openai.AsyncOpenAI,
346
400
stream = False ,
347
401
stream_options = {"include_usage" : True })
348
402
403
+ # Test stream=False, stream_options=
404
+ # {"continuous_usage_stats": None}
405
+ with pytest .raises (BadRequestError ):
406
+ await client .completions .create (
407
+ model = model_name ,
408
+ prompt = prompt ,
409
+ max_tokens = 5 ,
410
+ temperature = 0.0 ,
411
+ stream = False ,
412
+ stream_options = {"continuous_usage_stats" : None })
413
+
414
+ # Test stream=False, stream_options=
415
+ # {"continuous_usage_stats": True}
416
+ with pytest .raises (BadRequestError ):
417
+ await client .completions .create (
418
+ model = model_name ,
419
+ prompt = prompt ,
420
+ max_tokens = 5 ,
421
+ temperature = 0.0 ,
422
+ stream = False ,
423
+ stream_options = {"continuous_usage_stats" : True })
424
+
349
425
350
426
@pytest .mark .asyncio
351
427
@pytest .mark .parametrize (
0 commit comments