Skip to content

Commit 9c31244

Browse files
authored
Merge pull request #779 from openai/release-please--branches--master--changes--next--components--openai
release: 4.37.1
2 parents c2c998d + f3a5360 commit 9c31244

File tree

9 files changed

+153
-33
lines changed

9 files changed

+153
-33
lines changed

.release-please-manifest.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "4.37.0"
2+
".": "4.37.1"
33
}

CHANGELOG.md

+8
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,13 @@
11
# Changelog
22

3+
## 4.37.1 (2024-04-17)
4+
5+
Full Changelog: [v4.37.0...v4.37.1](https://github.com/openai/openai-node/compare/v4.37.0...v4.37.1)
6+
7+
### Chores
8+
9+
* **api:** docs and response_format response property ([#778](https://github.com/openai/openai-node/issues/778)) ([78f5c35](https://github.com/openai/openai-node/commit/78f5c3568d95d8e854c04049dc7d5643aa49e93f))
10+
311
## 4.37.0 (2024-04-17)
412

513
Full Changelog: [v4.36.0...v4.37.0](https://github.com/openai/openai-node/compare/v4.36.0...v4.37.0)

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ You can import in Deno via:
1919
<!-- x-release-please-start-version -->
2020

2121
```ts
22-
import OpenAI from 'https://deno.land/x/[email protected].0/mod.ts';
22+
import OpenAI from 'https://deno.land/x/[email protected].1/mod.ts';
2323
```
2424

2525
<!-- x-release-please-end -->

build-deno

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ This is a build produced from https://github.com/openai/openai-node – please g
1414
Usage:
1515
1616
\`\`\`ts
17-
import OpenAI from "https://deno.land/x/[email protected].0/mod.ts";
17+
import OpenAI from "https://deno.land/x/[email protected].1/mod.ts";
1818
1919
const client = new OpenAI();
2020
\`\`\`

package.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "openai",
3-
"version": "4.37.0",
3+
"version": "4.37.1",
44
"description": "The official TypeScript library for the OpenAI API",
55
"author": "OpenAI <[email protected]>",
66
"types": "dist/index.d.ts",

src/resources/beta/assistants.ts

+36-2
Original file line numberDiff line numberDiff line change
@@ -142,13 +142,47 @@ export interface Assistant {
142142
*/
143143
tools: Array<AssistantTool>;
144144

145+
/**
146+
* Specifies the format that the model must output. Compatible with
147+
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
148+
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
149+
*
150+
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
151+
* message the model generates is valid JSON.
152+
*
153+
* **Important:** when using JSON mode, you **must** also instruct the model to
154+
* produce JSON yourself via a system or user message. Without this, the model may
155+
* generate an unending stream of whitespace until the generation reaches the token
156+
* limit, resulting in a long-running and seemingly "stuck" request. Also note that
157+
* the message content may be partially cut off if `finish_reason="length"`, which
158+
* indicates the generation exceeded `max_tokens` or the conversation exceeded the
159+
* max context length.
160+
*/
161+
response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
162+
163+
/**
164+
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
165+
* make the output more random, while lower values like 0.2 will make it more
166+
* focused and deterministic.
167+
*/
168+
temperature?: number | null;
169+
145170
/**
146171
* A set of resources that are used by the assistant's tools. The resources are
147172
* specific to the type of tool. For example, the `code_interpreter` tool requires
148173
* a list of file IDs, while the `file_search` tool requires a list of vector store
149174
* IDs.
150175
*/
151176
tool_resources?: Assistant.ToolResources | null;
177+
178+
/**
179+
* An alternative to sampling with temperature, called nucleus sampling, where the
180+
* model considers the results of the tokens with top_p probability mass. So 0.1
181+
* means only the tokens comprising the top 10% probability mass are considered.
182+
*
183+
* We generally recommend altering this or temperature but not both.
184+
*/
185+
top_p?: number | null;
152186
}
153187

154188
export namespace Assistant {
@@ -1012,7 +1046,7 @@ export interface AssistantCreateParams {
10121046
/**
10131047
* Specifies the format that the model must output. Compatible with
10141048
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1015-
* all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
1049+
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
10161050
*
10171051
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
10181052
* message the model generates is valid JSON.
@@ -1158,7 +1192,7 @@ export interface AssistantUpdateParams {
11581192
/**
11591193
* Specifies the format that the model must output. Compatible with
11601194
* [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1161-
* all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
1195+
* all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
11621196
*
11631197
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
11641198
* message the model generates is valid JSON.

0 commit comments

Comments
 (0)