From 94f28338f297ab6299ff691e912d35e1f6d4340b Mon Sep 17 00:00:00 2001 From: Stainless Bot Date: Thu, 6 Jun 2024 18:55:33 +0000 Subject: [PATCH] feat(api): updates --- .stats.yml | 2 +- .../resources/beta/threads/runs/runs.py | 34 +++++++++++++++++++ src/openai/resources/beta/threads/threads.py | 34 +++++++++++++++++++ src/openai/resources/chat/completions.py | 34 +++++++++++++++++++ .../beta/thread_create_and_run_params.py | 7 ++++ src/openai/types/beta/threads/run.py | 7 ++++ .../types/beta/threads/run_create_params.py | 7 ++++ .../types/chat/completion_create_params.py | 7 ++++ tests/api_resources/beta/test_threads.py | 4 +++ tests/api_resources/beta/threads/test_runs.py | 4 +++ tests/api_resources/chat/test_completions.py | 4 +++ 11 files changed, 143 insertions(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 11d2b0b181..eb81a249f1 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 64 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0577fd0d08da6b867b002a5accd45f7116ef91c4940b41cf45dc479938c77163.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-ff436357b12348b7c1c930469332a79cd23ac6ec537e645c411893c42de42e57.yml diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 5083e59d2b..e5dab3a5cd 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -98,6 +98,7 @@ def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -152,6 +153,10 @@ def create( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -246,6 +251,7 @@ def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -303,6 +309,10 @@ def create( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -393,6 +403,7 @@ def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -450,6 +461,10 @@ def create( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -539,6 +554,7 @@ def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -568,6 +584,7 @@ def create( "max_prompt_tokens": max_prompt_tokens, "metadata": metadata, "model": model, + "parallel_tool_calls": parallel_tool_calls, "response_format": response_format, "stream": stream, "temperature": temperature, @@ -975,6 +992,7 @@ async def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1029,6 +1047,10 @@ async def create( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -1123,6 +1145,7 @@ async def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -1180,6 +1203,10 @@ async def create( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -1270,6 +1297,7 @@ async def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, @@ -1327,6 +1355,10 @@ async def create( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -1416,6 +1448,7 @@ async def create( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1445,6 +1478,7 @@ async def create( "max_prompt_tokens": max_prompt_tokens, "metadata": metadata, "model": model, + "parallel_tool_calls": parallel_tool_calls, "response_format": response_format, "stream": stream, "temperature": temperature, diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 9637f37d0a..c3d0131146 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -282,6 +282,7 @@ def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -331,6 +332,10 @@ def create_and_run( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -429,6 +434,7 @@ def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, @@ -481,6 +487,10 @@ def create_and_run( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -575,6 +585,7 @@ def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, @@ -627,6 +638,10 @@ def create_and_run( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -720,6 +735,7 @@ def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -747,6 +763,7 @@ def create_and_run( "max_prompt_tokens": max_prompt_tokens, "metadata": metadata, "model": model, + "parallel_tool_calls": parallel_tool_calls, "response_format": response_format, "stream": stream, "temperature": temperature, @@ -997,6 +1014,7 @@ async def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1046,6 +1064,10 @@ async def create_and_run( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -1144,6 +1166,7 @@ async def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, @@ -1196,6 +1219,10 @@ async def create_and_run( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -1290,6 +1317,7 @@ async def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, @@ -1342,6 +1370,10 @@ async def create_and_run( model associated with the assistant. If not, the model associated with the assistant will be used. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models/gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4), @@ -1435,6 +1467,7 @@ async def create_and_run( None, ] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1462,6 +1495,7 @@ async def create_and_run( "max_prompt_tokens": max_prompt_tokens, "metadata": metadata, "model": model, + "parallel_tool_calls": parallel_tool_calls, "response_format": response_format, "stream": stream, "temperature": temperature, diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index aa25bc1858..ab35b03335 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -55,6 +55,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -131,6 +132,10 @@ def create( you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -227,6 +232,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -309,6 +315,10 @@ def create( you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -398,6 +408,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -480,6 +491,10 @@ def create( you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -568,6 +583,7 @@ def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -600,6 +616,7 @@ def create( "logprobs": logprobs, "max_tokens": max_tokens, "n": n, + "parallel_tool_calls": parallel_tool_calls, "presence_penalty": presence_penalty, "response_format": response_format, "seed": seed, @@ -646,6 +663,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -722,6 +740,10 @@ async def create( you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -818,6 +840,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -900,6 +923,10 @@ async def create( you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -989,6 +1016,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -1071,6 +1099,10 @@ async def create( you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + parallel_tool_calls: Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. @@ -1159,6 +1191,7 @@ async def create( logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, + parallel_tool_calls: bool | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, @@ -1191,6 +1224,7 @@ async def create( "logprobs": logprobs, "max_tokens": max_tokens, "n": n, + "parallel_tool_calls": parallel_tool_calls, "presence_penalty": presence_penalty, "response_format": response_format, "seed": seed, diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 436c2daddf..b8c69eb7ac 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -109,6 +109,13 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): assistant will be used. """ + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + """ + response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index 8244ffd598..ea84f1e97c 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -151,6 +151,13 @@ class Run(BaseModel): object: Literal["thread.run"] """The object type, which is always `thread.run`.""" + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + """ + required_action: Optional[RequiredAction] = None """Details on the action required to continue the run. diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 90c9708596..a7aa799e00 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -107,6 +107,13 @@ class RunCreateParamsBase(TypedDict, total=False): assistant will be used. """ + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + """ + response_format: Optional[AssistantResponseFormatOptionParam] """Specifies the format that the model must output. diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index a25f2fdd8f..47c2a5e24e 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -102,6 +102,13 @@ class CompletionCreateParamsBase(TypedDict, total=False): of the choices. Keep `n` as `1` to minimize costs. """ + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling) + during tool use. + """ + presence_penalty: Optional[float] """Number between -2.0 and 2.0. diff --git a/tests/api_resources/beta/test_threads.py b/tests/api_resources/beta/test_threads.py index 041562cb38..9e06b597ef 100644 --- a/tests/api_resources/beta/test_threads.py +++ b/tests/api_resources/beta/test_threads.py @@ -303,6 +303,7 @@ def test_method_create_and_run_with_all_params_overload_1(self, client: OpenAI) max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", stream=False, temperature=1, @@ -473,6 +474,7 @@ def test_method_create_and_run_with_all_params_overload_2(self, client: OpenAI) max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", temperature=1, thread={ @@ -911,6 +913,7 @@ async def test_method_create_and_run_with_all_params_overload_1(self, async_clie max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", stream=False, temperature=1, @@ -1081,6 +1084,7 @@ async def test_method_create_and_run_with_all_params_overload_2(self, async_clie max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", temperature=1, thread={ diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index bf4eba0689..ffadc1df88 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -134,6 +134,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", stream=False, temperature=1, @@ -297,6 +298,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", temperature=1, tool_choice="none", @@ -798,6 +800,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", stream=False, temperature=1, @@ -961,6 +964,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn max_prompt_tokens=256, metadata={}, model="gpt-4-turbo", + parallel_tool_calls=True, response_format="none", temperature=1, tool_choice="none", diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 1c195c4001..3099e16815 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -56,6 +56,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: logprobs=True, max_tokens=0, n=1, + parallel_tool_calls=True, presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, @@ -171,6 +172,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: logprobs=True, max_tokens=0, n=1, + parallel_tool_calls=True, presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, @@ -288,6 +290,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn logprobs=True, max_tokens=0, n=1, + parallel_tool_calls=True, presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, @@ -403,6 +406,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn logprobs=True, max_tokens=0, n=1, + parallel_tool_calls=True, presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000,