Skip to content

Commit 3cde087

Browse files
feat(api): updates (#1474)
1 parent 2951f87 commit 3cde087

File tree

11 files changed

+143
-1
lines changed

11 files changed

+143
-1
lines changed

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 64
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0577fd0d08da6b867b002a5accd45f7116ef91c4940b41cf45dc479938c77163.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-ff436357b12348b7c1c930469332a79cd23ac6ec537e645c411893c42de42e57.yml

src/openai/resources/beta/threads/runs/runs.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,7 @@ def create(
9898
None,
9999
]
100100
| NotGiven = NOT_GIVEN,
101+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
101102
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
102103
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
103104
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -152,6 +153,10 @@ def create(
152153
model associated with the assistant. If not, the model associated with the
153154
assistant will be used.
154155
156+
parallel_tool_calls: Whether to enable
157+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
158+
during tool use.
159+
155160
response_format: Specifies the format that the model must output. Compatible with
156161
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
157162
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -246,6 +251,7 @@ def create(
246251
None,
247252
]
248253
| NotGiven = NOT_GIVEN,
254+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
249255
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
250256
temperature: Optional[float] | NotGiven = NOT_GIVEN,
251257
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -303,6 +309,10 @@ def create(
303309
model associated with the assistant. If not, the model associated with the
304310
assistant will be used.
305311
312+
parallel_tool_calls: Whether to enable
313+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
314+
during tool use.
315+
306316
response_format: Specifies the format that the model must output. Compatible with
307317
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
308318
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -393,6 +403,7 @@ def create(
393403
None,
394404
]
395405
| NotGiven = NOT_GIVEN,
406+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
396407
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
397408
temperature: Optional[float] | NotGiven = NOT_GIVEN,
398409
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -450,6 +461,10 @@ def create(
450461
model associated with the assistant. If not, the model associated with the
451462
assistant will be used.
452463
464+
parallel_tool_calls: Whether to enable
465+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
466+
during tool use.
467+
453468
response_format: Specifies the format that the model must output. Compatible with
454469
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
455470
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -539,6 +554,7 @@ def create(
539554
None,
540555
]
541556
| NotGiven = NOT_GIVEN,
557+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
542558
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
543559
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
544560
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -568,6 +584,7 @@ def create(
568584
"max_prompt_tokens": max_prompt_tokens,
569585
"metadata": metadata,
570586
"model": model,
587+
"parallel_tool_calls": parallel_tool_calls,
571588
"response_format": response_format,
572589
"stream": stream,
573590
"temperature": temperature,
@@ -975,6 +992,7 @@ async def create(
975992
None,
976993
]
977994
| NotGiven = NOT_GIVEN,
995+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
978996
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
979997
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
980998
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -1029,6 +1047,10 @@ async def create(
10291047
model associated with the assistant. If not, the model associated with the
10301048
assistant will be used.
10311049
1050+
parallel_tool_calls: Whether to enable
1051+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1052+
during tool use.
1053+
10321054
response_format: Specifies the format that the model must output. Compatible with
10331055
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
10341056
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1123,6 +1145,7 @@ async def create(
11231145
None,
11241146
]
11251147
| NotGiven = NOT_GIVEN,
1148+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
11261149
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
11271150
temperature: Optional[float] | NotGiven = NOT_GIVEN,
11281151
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -1180,6 +1203,10 @@ async def create(
11801203
model associated with the assistant. If not, the model associated with the
11811204
assistant will be used.
11821205
1206+
parallel_tool_calls: Whether to enable
1207+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1208+
during tool use.
1209+
11831210
response_format: Specifies the format that the model must output. Compatible with
11841211
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
11851212
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1270,6 +1297,7 @@ async def create(
12701297
None,
12711298
]
12721299
| NotGiven = NOT_GIVEN,
1300+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
12731301
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
12741302
temperature: Optional[float] | NotGiven = NOT_GIVEN,
12751303
tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
@@ -1327,6 +1355,10 @@ async def create(
13271355
model associated with the assistant. If not, the model associated with the
13281356
assistant will be used.
13291357
1358+
parallel_tool_calls: Whether to enable
1359+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1360+
during tool use.
1361+
13301362
response_format: Specifies the format that the model must output. Compatible with
13311363
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
13321364
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1416,6 +1448,7 @@ async def create(
14161448
None,
14171449
]
14181450
| NotGiven = NOT_GIVEN,
1451+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
14191452
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
14201453
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
14211454
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -1445,6 +1478,7 @@ async def create(
14451478
"max_prompt_tokens": max_prompt_tokens,
14461479
"metadata": metadata,
14471480
"model": model,
1481+
"parallel_tool_calls": parallel_tool_calls,
14481482
"response_format": response_format,
14491483
"stream": stream,
14501484
"temperature": temperature,

src/openai/resources/beta/threads/threads.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -282,6 +282,7 @@ def create_and_run(
282282
None,
283283
]
284284
| NotGiven = NOT_GIVEN,
285+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
285286
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
286287
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
287288
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -331,6 +332,10 @@ def create_and_run(
331332
model associated with the assistant. If not, the model associated with the
332333
assistant will be used.
333334
335+
parallel_tool_calls: Whether to enable
336+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
337+
during tool use.
338+
334339
response_format: Specifies the format that the model must output. Compatible with
335340
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
336341
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -429,6 +434,7 @@ def create_and_run(
429434
None,
430435
]
431436
| NotGiven = NOT_GIVEN,
437+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
432438
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
433439
temperature: Optional[float] | NotGiven = NOT_GIVEN,
434440
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -481,6 +487,10 @@ def create_and_run(
481487
model associated with the assistant. If not, the model associated with the
482488
assistant will be used.
483489
490+
parallel_tool_calls: Whether to enable
491+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
492+
during tool use.
493+
484494
response_format: Specifies the format that the model must output. Compatible with
485495
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
486496
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -575,6 +585,7 @@ def create_and_run(
575585
None,
576586
]
577587
| NotGiven = NOT_GIVEN,
588+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
578589
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
579590
temperature: Optional[float] | NotGiven = NOT_GIVEN,
580591
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -627,6 +638,10 @@ def create_and_run(
627638
model associated with the assistant. If not, the model associated with the
628639
assistant will be used.
629640
641+
parallel_tool_calls: Whether to enable
642+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
643+
during tool use.
644+
630645
response_format: Specifies the format that the model must output. Compatible with
631646
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
632647
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -720,6 +735,7 @@ def create_and_run(
720735
None,
721736
]
722737
| NotGiven = NOT_GIVEN,
738+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
723739
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
724740
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
725741
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -747,6 +763,7 @@ def create_and_run(
747763
"max_prompt_tokens": max_prompt_tokens,
748764
"metadata": metadata,
749765
"model": model,
766+
"parallel_tool_calls": parallel_tool_calls,
750767
"response_format": response_format,
751768
"stream": stream,
752769
"temperature": temperature,
@@ -997,6 +1014,7 @@ async def create_and_run(
9971014
None,
9981015
]
9991016
| NotGiven = NOT_GIVEN,
1017+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
10001018
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
10011019
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
10021020
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -1046,6 +1064,10 @@ async def create_and_run(
10461064
model associated with the assistant. If not, the model associated with the
10471065
assistant will be used.
10481066
1067+
parallel_tool_calls: Whether to enable
1068+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1069+
during tool use.
1070+
10491071
response_format: Specifies the format that the model must output. Compatible with
10501072
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
10511073
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1144,6 +1166,7 @@ async def create_and_run(
11441166
None,
11451167
]
11461168
| NotGiven = NOT_GIVEN,
1169+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
11471170
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
11481171
temperature: Optional[float] | NotGiven = NOT_GIVEN,
11491172
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -1196,6 +1219,10 @@ async def create_and_run(
11961219
model associated with the assistant. If not, the model associated with the
11971220
assistant will be used.
11981221
1222+
parallel_tool_calls: Whether to enable
1223+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1224+
during tool use.
1225+
11991226
response_format: Specifies the format that the model must output. Compatible with
12001227
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
12011228
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1290,6 +1317,7 @@ async def create_and_run(
12901317
None,
12911318
]
12921319
| NotGiven = NOT_GIVEN,
1320+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
12931321
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
12941322
temperature: Optional[float] | NotGiven = NOT_GIVEN,
12951323
thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
@@ -1342,6 +1370,10 @@ async def create_and_run(
13421370
model associated with the assistant. If not, the model associated with the
13431371
assistant will be used.
13441372
1373+
parallel_tool_calls: Whether to enable
1374+
[parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1375+
during tool use.
1376+
13451377
response_format: Specifies the format that the model must output. Compatible with
13461378
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
13471379
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1435,6 +1467,7 @@ async def create_and_run(
14351467
None,
14361468
]
14371469
| NotGiven = NOT_GIVEN,
1470+
parallel_tool_calls: bool | NotGiven = NOT_GIVEN,
14381471
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
14391472
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
14401473
temperature: Optional[float] | NotGiven = NOT_GIVEN,
@@ -1462,6 +1495,7 @@ async def create_and_run(
14621495
"max_prompt_tokens": max_prompt_tokens,
14631496
"metadata": metadata,
14641497
"model": model,
1498+
"parallel_tool_calls": parallel_tool_calls,
14651499
"response_format": response_format,
14661500
"stream": stream,
14671501
"temperature": temperature,

0 commit comments

Comments
 (0)