@@ -151,13 +151,18 @@ def get_mock_chat_completion_kwargs(async_open_ai: AsyncOpenAI) -> list[dict[str
151
151
152
152
153
153
def completion_message (
154
- message : ChatCompletionMessage , * , usage : CompletionUsage | None = None , logprobs : ChoiceLogprobs | None = None
154
+ message : ChatCompletionMessage ,
155
+ * ,
156
+ usage : CompletionUsage | None = None ,
157
+ logprobs : ChoiceLogprobs | None = None ,
158
+ response_id : str = '123' ,
159
+ finish_reason : str = 'stop'
155
160
) -> chat .ChatCompletion :
156
- choices = [Choice (finish_reason = 'stop' , index = 0 , message = message )]
161
+ choices = [Choice (finish_reason = finish_reason , index = 0 , message = message )]
157
162
if logprobs :
158
- choices = [Choice (finish_reason = 'stop' , index = 0 , message = message , logprobs = logprobs )]
163
+ choices = [Choice (finish_reason = finish_reason , index = 0 , message = message , logprobs = logprobs )]
159
164
return chat .ChatCompletion (
160
- id = '123' ,
165
+ id = response_id ,
161
166
choices = choices ,
162
167
created = 1704067200 , # 2024-01-01
163
168
model = 'gpt-4o-123' ,
@@ -192,13 +197,17 @@ async def test_request_simple_success(allow_model_requests: None):
192
197
model_name = 'gpt-4o-123' ,
193
198
timestamp = datetime (2024 , 1 , 1 , 0 , 0 , tzinfo = timezone .utc ),
194
199
provider_request_id = '123' ,
200
+ id = '123' ,
201
+ finish_reason = 'stop' ,
195
202
),
196
203
ModelRequest (parts = [UserPromptPart (content = 'hello' , timestamp = IsNow (tz = timezone .utc ))]),
197
204
ModelResponse (
198
205
parts = [TextPart (content = 'world' )],
199
206
model_name = 'gpt-4o-123' ,
200
207
timestamp = datetime (2024 , 1 , 1 , 0 , 0 , tzinfo = timezone .utc ),
201
208
provider_request_id = '123' ,
209
+ id = '123' ,
210
+ finish_reason = 'stop' ,
202
211
),
203
212
]
204
213
)
@@ -242,6 +251,36 @@ async def test_request_simple_usage(allow_model_requests: None):
242
251
)
243
252
244
253
254
+ async def test_id_and_finish_reason_fields (allow_model_requests : None ):
255
+ """Test that id and finish_reason fields are properly populated in ModelResponse."""
256
+ # Test with different finish reasons
257
+ test_cases = [
258
+ ('stop' , 'response-id-1' ),
259
+ ('length' , 'response-id-2' ),
260
+ ('tool_calls' , 'response-id-3' ),
261
+ ]
262
+
263
+ for finish_reason , response_id in test_cases :
264
+ c = completion_message (
265
+ ChatCompletionMessage (content = 'test response' , role = 'assistant' ),
266
+ response_id = response_id ,
267
+ finish_reason = finish_reason ,
268
+ )
269
+ mock_client = MockOpenAI .create_mock (c )
270
+ m = OpenAIModel ('gpt-4o' , provider = OpenAIProvider (openai_client = mock_client ))
271
+ agent = Agent (m )
272
+
273
+ result = await agent .run ('test' )
274
+ assert result .output == 'test response'
275
+
276
+ # Check that the ModelResponse contains the correct id and finish_reason
277
+ messages = result .all_messages ()
278
+ model_response = messages [1 ] # Second message should be the model response
279
+ assert isinstance (model_response , ModelResponse )
280
+ assert model_response .id == response_id
281
+ assert model_response .finish_reason == finish_reason
282
+
283
+
245
284
async def test_request_structured_response (allow_model_requests : None ):
246
285
c = completion_message (
247
286
ChatCompletionMessage (
@@ -422,9 +461,9 @@ async def get_location(loc_name: str) -> str:
422
461
FinishReason = Literal ['stop' , 'length' , 'tool_calls' , 'content_filter' , 'function_call' ]
423
462
424
463
425
- def chunk (delta : list [ChoiceDelta ], finish_reason : FinishReason | None = None ) -> chat .ChatCompletionChunk :
464
+ def chunk (delta : list [ChoiceDelta ], finish_reason : FinishReason | None = None , chunk_id : str = 'x' ) -> chat .ChatCompletionChunk :
426
465
return chat .ChatCompletionChunk (
427
- id = 'x' ,
466
+ id = chunk_id ,
428
467
choices = [
429
468
ChunkChoice (index = index , delta = delta , finish_reason = finish_reason ) for index , delta in enumerate (delta )
430
469
],
@@ -435,8 +474,8 @@ def chunk(delta: list[ChoiceDelta], finish_reason: FinishReason | None = None) -
435
474
)
436
475
437
476
438
- def text_chunk (text : str , finish_reason : FinishReason | None = None ) -> chat .ChatCompletionChunk :
439
- return chunk ([ChoiceDelta (content = text , role = 'assistant' )], finish_reason = finish_reason )
477
+ def text_chunk (text : str , finish_reason : FinishReason | None = None , chunk_id : str = 'x' ) -> chat .ChatCompletionChunk :
478
+ return chunk ([ChoiceDelta (content = text , role = 'assistant' )], finish_reason = finish_reason , chunk_id = chunk_id )
440
479
441
480
442
481
async def test_stream_text (allow_model_requests : None ):
@@ -552,6 +591,55 @@ async def test_stream_structured_finish_reason(allow_model_requests: None):
552
591
assert result .is_complete
553
592
554
593
594
+ async def test_stream_id_and_finish_reason_fields (allow_model_requests : None ):
595
+ """Test that streaming responses properly track id and finish_reason fields."""
596
+ # Test streaming text response
597
+ stream = [
598
+ text_chunk ('hello ' , chunk_id = 'stream-response-123' ),
599
+ text_chunk ('world' , chunk_id = 'stream-response-123' ),
600
+ text_chunk ('!' , finish_reason = 'stop' , chunk_id = 'stream-response-123' ),
601
+ ]
602
+ mock_client = MockOpenAI .create_mock_stream (stream )
603
+ m = OpenAIModel ('gpt-4o' , provider = OpenAIProvider (openai_client = mock_client ))
604
+ agent = Agent (m )
605
+
606
+ async with agent .run_stream ('test' ) as result :
607
+ assert not result .is_complete
608
+ text_chunks = [c async for c in result .stream_text (debounce_by = None )]
609
+ assert text_chunks == ['hello ' , 'hello world' , 'hello world!' ]
610
+ assert result .is_complete
611
+
612
+ # Get the final messages and check the ModelResponse
613
+ messages = result .all_messages ()
614
+ model_response = messages [1 ] # Second message should be the model response
615
+ assert isinstance (model_response , ModelResponse )
616
+ assert model_response .id == 'stream-response-123'
617
+ assert model_response .finish_reason == 'stop'
618
+
619
+ # Test streaming with structured output and different finish reason
620
+ stream = [
621
+ struc_chunk ('final_result' , None ),
622
+ chunk ([ChoiceDelta (tool_calls = [ChoiceDeltaToolCall (index = 0 , function = ChoiceDeltaToolCallFunction (name = None , arguments = '{"first": "Test"' ))])], chunk_id = 'struct-response-456' ),
623
+ chunk ([ChoiceDelta (tool_calls = [ChoiceDeltaToolCall (index = 0 , function = ChoiceDeltaToolCallFunction (name = None , arguments = '}' ))])], finish_reason = 'length' , chunk_id = 'struct-response-456' ),
624
+ ]
625
+ mock_client = MockOpenAI .create_mock_stream (stream )
626
+ m = OpenAIModel ('gpt-4o' , provider = OpenAIProvider (openai_client = mock_client ))
627
+ agent = Agent (m , output_type = MyTypedDict )
628
+
629
+ async with agent .run_stream ('test' ) as result :
630
+ assert not result .is_complete
631
+ structured_chunks = [dict (c ) async for c in result .stream (debounce_by = None )]
632
+ assert structured_chunks == [{'first' : 'Test' }, {'first' : 'Test' }]
633
+ assert result .is_complete
634
+
635
+ # Get the final messages and check the ModelResponse
636
+ messages = result .all_messages ()
637
+ model_response = messages [1 ] # Second message should be the model response
638
+ assert isinstance (model_response , ModelResponse )
639
+ assert model_response .id == 'struct-response-456'
640
+ assert model_response .finish_reason == 'length'
641
+
642
+
555
643
async def test_stream_native_output (allow_model_requests : None ):
556
644
stream = [
557
645
chunk ([]),
0 commit comments