|
3 | 3 | import json |
4 | 4 | import time |
5 | 5 | from collections.abc import AsyncIterator |
6 | | -from typing import TYPE_CHECKING, Any, Literal, cast, overload |
| 6 | +from typing import TYPE_CHECKING, Any, Literal, overload |
7 | 7 |
|
8 | 8 | from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream |
9 | 9 | from openai.types import ChatModel |
|
28 | 28 | from .chatcmpl_stream_handler import ChatCmplStreamHandler |
29 | 29 | from .fake_id import FAKE_RESPONSES_ID |
30 | 30 | from .interface import Model, ModelTracing |
| 31 | +from .openai_responses import Converter as OpenAIResponsesConverter |
31 | 32 |
|
32 | 33 | if TYPE_CHECKING: |
33 | 34 | from ..model_settings import ModelSettings |
@@ -296,15 +297,27 @@ async def _fetch_response( |
296 | 297 | if isinstance(ret, ChatCompletion): |
297 | 298 | return ret |
298 | 299 |
|
| 300 | + responses_tool_choice = OpenAIResponsesConverter.convert_tool_choice( |
| 301 | + model_settings.tool_choice |
| 302 | + ) |
| 303 | + if responses_tool_choice is None or responses_tool_choice == NOT_GIVEN: |
| 304 | + # For Responses API data compatibility with Chat Completions patterns, |
| 305 | + # we need to set "none" if tool_choice is absent. |
| 306 | + # Without this fix, you'll get the following error: |
| 307 | + # pydantic_core._pydantic_core.ValidationError: 4 validation errors for Response |
| 308 | + # tool_choice.literal['none','auto','required'] |
| 309 | + # Input should be 'none', 'auto' or 'required' |
| 310 | + # [type=literal_error, input_value=NOT_GIVEN, input_type=NotGiven] |
| 311 | + # see also: https://github.com/openai/openai-agents-python/issues/980 |
| 312 | + responses_tool_choice = "auto" |
| 313 | + |
299 | 314 | response = Response( |
300 | 315 | id=FAKE_RESPONSES_ID, |
301 | 316 | created_at=time.time(), |
302 | 317 | model=self.model, |
303 | 318 | object="response", |
304 | 319 | output=[], |
305 | | - tool_choice=cast(Literal["auto", "required", "none"], tool_choice) |
306 | | - if tool_choice != NOT_GIVEN |
307 | | - else "auto", |
| 320 | + tool_choice=responses_tool_choice, # type: ignore[arg-type] |
308 | 321 | top_p=model_settings.top_p, |
309 | 322 | temperature=model_settings.temperature, |
310 | 323 | tools=[], |
|
0 commit comments