@@ -282,6 +282,7 @@ def create_and_run(
282
282
None ,
283
283
]
284
284
| NotGiven = NOT_GIVEN ,
285
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
285
286
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
286
287
stream : Optional [Literal [False ]] | NotGiven = NOT_GIVEN ,
287
288
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -331,6 +332,10 @@ def create_and_run(
331
332
model associated with the assistant. If not, the model associated with the
332
333
assistant will be used.
333
334
335
+ parallel_tool_calls: Whether to enable
336
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
337
+ during tool use.
338
+
334
339
response_format: Specifies the format that the model must output. Compatible with
335
340
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
336
341
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -429,6 +434,7 @@ def create_and_run(
429
434
None ,
430
435
]
431
436
| NotGiven = NOT_GIVEN ,
437
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
432
438
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
433
439
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
434
440
thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -481,6 +487,10 @@ def create_and_run(
481
487
model associated with the assistant. If not, the model associated with the
482
488
assistant will be used.
483
489
490
+ parallel_tool_calls: Whether to enable
491
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
492
+ during tool use.
493
+
484
494
response_format: Specifies the format that the model must output. Compatible with
485
495
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
486
496
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -575,6 +585,7 @@ def create_and_run(
575
585
None ,
576
586
]
577
587
| NotGiven = NOT_GIVEN ,
588
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
578
589
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
579
590
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
580
591
thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -627,6 +638,10 @@ def create_and_run(
627
638
model associated with the assistant. If not, the model associated with the
628
639
assistant will be used.
629
640
641
+ parallel_tool_calls: Whether to enable
642
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
643
+ during tool use.
644
+
630
645
response_format: Specifies the format that the model must output. Compatible with
631
646
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
632
647
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -720,6 +735,7 @@ def create_and_run(
720
735
None ,
721
736
]
722
737
| NotGiven = NOT_GIVEN ,
738
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
723
739
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
724
740
stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
725
741
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -747,6 +763,7 @@ def create_and_run(
747
763
"max_prompt_tokens" : max_prompt_tokens ,
748
764
"metadata" : metadata ,
749
765
"model" : model ,
766
+ "parallel_tool_calls" : parallel_tool_calls ,
750
767
"response_format" : response_format ,
751
768
"stream" : stream ,
752
769
"temperature" : temperature ,
@@ -997,6 +1014,7 @@ async def create_and_run(
997
1014
None ,
998
1015
]
999
1016
| NotGiven = NOT_GIVEN ,
1017
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
1000
1018
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
1001
1019
stream : Optional [Literal [False ]] | NotGiven = NOT_GIVEN ,
1002
1020
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -1046,6 +1064,10 @@ async def create_and_run(
1046
1064
model associated with the assistant. If not, the model associated with the
1047
1065
assistant will be used.
1048
1066
1067
+ parallel_tool_calls: Whether to enable
1068
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1069
+ during tool use.
1070
+
1049
1071
response_format: Specifies the format that the model must output. Compatible with
1050
1072
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1051
1073
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1144,6 +1166,7 @@ async def create_and_run(
1144
1166
None ,
1145
1167
]
1146
1168
| NotGiven = NOT_GIVEN ,
1169
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
1147
1170
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
1148
1171
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
1149
1172
thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -1196,6 +1219,10 @@ async def create_and_run(
1196
1219
model associated with the assistant. If not, the model associated with the
1197
1220
assistant will be used.
1198
1221
1222
+ parallel_tool_calls: Whether to enable
1223
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1224
+ during tool use.
1225
+
1199
1226
response_format: Specifies the format that the model must output. Compatible with
1200
1227
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1201
1228
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1290,6 +1317,7 @@ async def create_and_run(
1290
1317
None ,
1291
1318
]
1292
1319
| NotGiven = NOT_GIVEN ,
1320
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
1293
1321
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
1294
1322
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
1295
1323
thread : thread_create_and_run_params .Thread | NotGiven = NOT_GIVEN ,
@@ -1342,6 +1370,10 @@ async def create_and_run(
1342
1370
model associated with the assistant. If not, the model associated with the
1343
1371
assistant will be used.
1344
1372
1373
+ parallel_tool_calls: Whether to enable
1374
+ [parallel function calling](https://platform.openai.com/docs/guides/function-calling)
1375
+ during tool use.
1376
+
1345
1377
response_format: Specifies the format that the model must output. Compatible with
1346
1378
[GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1347
1379
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
@@ -1435,6 +1467,7 @@ async def create_and_run(
1435
1467
None ,
1436
1468
]
1437
1469
| NotGiven = NOT_GIVEN ,
1470
+ parallel_tool_calls : bool | NotGiven = NOT_GIVEN ,
1438
1471
response_format : Optional [AssistantResponseFormatOptionParam ] | NotGiven = NOT_GIVEN ,
1439
1472
stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
1440
1473
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
@@ -1462,6 +1495,7 @@ async def create_and_run(
1462
1495
"max_prompt_tokens" : max_prompt_tokens ,
1463
1496
"metadata" : metadata ,
1464
1497
"model" : model ,
1498
+ "parallel_tool_calls" : parallel_tool_calls ,
1465
1499
"response_format" : response_format ,
1466
1500
"stream" : stream ,
1467
1501
"temperature" : temperature ,
0 commit comments