Skip to content

feat(api): updates #1461

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 64
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-363dd904e5d6e65b3a323fc88e6b502fb23a6aa319be219273e3ee47c7530993.yml
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0577fd0d08da6b867b002a5accd45f7116ef91c4940b41cf45dc479938c77163.yml
18 changes: 12 additions & 6 deletions src/openai/resources/batches.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def create(
for how to upload a file.

Your input file must be formatted as a
[JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput),
[JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
requests, and can be up to 100 MB in size.

Expand Down Expand Up @@ -195,8 +195,11 @@ def cancel(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Batch:
"""
Cancels an in-progress batch.
"""Cancels an in-progress batch.

The batch will be in status `cancelling` for up to
10 minutes, before changing to `cancelled`, where it will have partial results
(if any) available in the output file.

Args:
extra_headers: Send extra headers
Expand Down Expand Up @@ -259,7 +262,7 @@ async def create(
for how to upload a file.

Your input file must be formatted as a
[JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput),
[JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
requests, and can be up to 100 MB in size.

Expand Down Expand Up @@ -386,8 +389,11 @@ async def cancel(
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> Batch:
"""
Cancels an in-progress batch.
"""Cancels an in-progress batch.

The batch will be in status `cancelling` for up to
10 minutes, before changing to `cancelled`, where it will have partial results
(if any) available in the output file.

Args:
extra_headers: Send extra headers
Expand Down
24 changes: 22 additions & 2 deletions src/openai/resources/beta/vector_stores/file_batches.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ def create(
vector_store_id: str,
*,
file_ids: List[str],
chunking_strategy: file_batch_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand All @@ -57,6 +58,9 @@ def create(
the vector store should use. Useful for tools like `file_search` that can access
files.

chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
strategy.

extra_headers: Send extra headers

extra_query: Add additional query parameters to the request
Expand All @@ -70,7 +74,13 @@ def create(
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/vector_stores/{vector_store_id}/file_batches",
body=maybe_transform({"file_ids": file_ids}, file_batch_create_params.FileBatchCreateParams),
body=maybe_transform(
{
"file_ids": file_ids,
"chunking_strategy": chunking_strategy,
},
file_batch_create_params.FileBatchCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
Expand Down Expand Up @@ -242,6 +252,7 @@ async def create(
vector_store_id: str,
*,
file_ids: List[str],
chunking_strategy: file_batch_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand All @@ -257,6 +268,9 @@ async def create(
the vector store should use. Useful for tools like `file_search` that can access
files.

chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
strategy.

extra_headers: Send extra headers

extra_query: Add additional query parameters to the request
Expand All @@ -270,7 +284,13 @@ async def create(
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/vector_stores/{vector_store_id}/file_batches",
body=await async_maybe_transform({"file_ids": file_ids}, file_batch_create_params.FileBatchCreateParams),
body=await async_maybe_transform(
{
"file_ids": file_ids,
"chunking_strategy": chunking_strategy,
},
file_batch_create_params.FileBatchCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
Expand Down
24 changes: 22 additions & 2 deletions src/openai/resources/beta/vector_stores/files.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ def create(
vector_store_id: str,
*,
file_id: str,
chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand All @@ -58,6 +59,9 @@ def create(
vector store should use. Useful for tools like `file_search` that can access
files.

chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
strategy.

extra_headers: Send extra headers

extra_query: Add additional query parameters to the request
Expand All @@ -71,7 +75,13 @@ def create(
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/vector_stores/{vector_store_id}/files",
body=maybe_transform({"file_id": file_id}, file_create_params.FileCreateParams),
body=maybe_transform(
{
"file_id": file_id,
"chunking_strategy": chunking_strategy,
},
file_create_params.FileCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
Expand Down Expand Up @@ -242,6 +252,7 @@ async def create(
vector_store_id: str,
*,
file_id: str,
chunking_strategy: file_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand All @@ -259,6 +270,9 @@ async def create(
vector store should use. Useful for tools like `file_search` that can access
files.

chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
strategy.

extra_headers: Send extra headers

extra_query: Add additional query parameters to the request
Expand All @@ -272,7 +286,13 @@ async def create(
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return await self._post(
f"/vector_stores/{vector_store_id}/files",
body=await async_maybe_transform({"file_id": file_id}, file_create_params.FileCreateParams),
body=await async_maybe_transform(
{
"file_id": file_id,
"chunking_strategy": chunking_strategy,
},
file_create_params.FileCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
Expand Down
10 changes: 10 additions & 0 deletions src/openai/resources/beta/vector_stores/vector_stores.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ def with_streaming_response(self) -> VectorStoresWithStreamingResponse:
def create(
self,
*,
chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN,
expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
file_ids: List[str] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
Expand All @@ -79,6 +80,9 @@ def create(
Create a vector store.

Args:
chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
strategy. Only applicable if `file_ids` is non-empty.

expires_after: The expiration policy for a vector store.

file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
Expand All @@ -105,6 +109,7 @@ def create(
"/vector_stores",
body=maybe_transform(
{
"chunking_strategy": chunking_strategy,
"expires_after": expires_after,
"file_ids": file_ids,
"metadata": metadata,
Expand Down Expand Up @@ -326,6 +331,7 @@ def with_streaming_response(self) -> AsyncVectorStoresWithStreamingResponse:
async def create(
self,
*,
chunking_strategy: vector_store_create_params.ChunkingStrategy | NotGiven = NOT_GIVEN,
expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN,
file_ids: List[str] | NotGiven = NOT_GIVEN,
metadata: Optional[object] | NotGiven = NOT_GIVEN,
Expand All @@ -341,6 +347,9 @@ async def create(
Create a vector store.

Args:
chunking_strategy: The chunking strategy used to chunk the file(s). If not set, will use the `auto`
strategy. Only applicable if `file_ids` is non-empty.

expires_after: The expiration policy for a vector store.

file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
Expand All @@ -367,6 +376,7 @@ async def create(
"/vector_stores",
body=await async_maybe_transform(
{
"chunking_strategy": chunking_strategy,
"expires_after": expires_after,
"file_ids": file_ids,
"metadata": metadata,
Expand Down
24 changes: 18 additions & 6 deletions src/openai/resources/files.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def create(
self,
*,
file: FileTypes,
purpose: Literal["assistants", "batch", "fine-tune"],
purpose: Literal["assistants", "batch", "fine-tune", "vision"],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand All @@ -70,9 +70,15 @@ def create(
[Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for
details.

The Fine-tuning API only supports `.jsonl` files.
The Fine-tuning API only supports `.jsonl` files. The input also has certain
required formats for fine-tuning
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
models.

The Batch API only supports `.jsonl` files up to 100 MB in size.
The Batch API only supports `.jsonl` files up to 100 MB in size. The input also
has a specific required
[format](https://platform.openai.com/docs/api-reference/batch/request-input).

Please [contact us](https://help.openai.com/) if you need to increase these
storage limits.
Expand Down Expand Up @@ -305,7 +311,7 @@ async def create(
self,
*,
file: FileTypes,
purpose: Literal["assistants", "batch", "fine-tune"],
purpose: Literal["assistants", "batch", "fine-tune", "vision"],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand All @@ -324,9 +330,15 @@ async def create(
[Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for
details.

The Fine-tuning API only supports `.jsonl` files.
The Fine-tuning API only supports `.jsonl` files. The input also has certain
required formats for fine-tuning
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
models.

The Batch API only supports `.jsonl` files up to 100 MB in size.
The Batch API only supports `.jsonl` files up to 100 MB in size. The input also
has a specific required
[format](https://platform.openai.com/docs/api-reference/batch/request-input).

Please [contact us](https://help.openai.com/) if you need to increase these
storage limits.
Expand Down
10 changes: 10 additions & 0 deletions src/openai/resources/fine_tuning/jobs/jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,11 @@ def create(
Your dataset must be formatted as a JSONL file. Additionally, you must upload
your file with the purpose `fine-tune`.

The contents of the file should differ depending on if the model uses the
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
format.

See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
for more details.

Expand Down Expand Up @@ -362,6 +367,11 @@ async def create(
Your dataset must be formatted as a JSONL file. Additionally, you must upload
your file with the purpose `fine-tune`.

The contents of the file should differ depending on if the model uses the
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
format.

See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
for more details.

Expand Down
2 changes: 1 addition & 1 deletion src/openai/types/batch_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class BatchCreateParams(TypedDict, total=False):
for how to upload a file.

Your input file must be formatted as a
[JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput),
[JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
and must be uploaded with the purpose `batch`. The file can contain up to 50,000
requests, and can be up to 100 MB in size.
"""
Expand Down
42 changes: 42 additions & 0 deletions src/openai/types/beta/assistant_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,10 @@
"ToolResourcesCodeInterpreter",
"ToolResourcesFileSearch",
"ToolResourcesFileSearchVectorStore",
"ToolResourcesFileSearchVectorStoreChunkingStrategy",
"ToolResourcesFileSearchVectorStoreChunkingStrategyAuto",
"ToolResourcesFileSearchVectorStoreChunkingStrategyStatic",
"ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic",
]


Expand Down Expand Up @@ -134,7 +138,45 @@ class ToolResourcesCodeInterpreter(TypedDict, total=False):
"""


class ToolResourcesFileSearchVectorStoreChunkingStrategyAuto(TypedDict, total=False):
type: Required[Literal["auto"]]
"""Always `auto`."""


class ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic(TypedDict, total=False):
chunk_overlap_tokens: Required[int]
"""The number of tokens that overlap between chunks. The default value is `400`.

Note that the overlap must not exceed half of `max_chunk_size_tokens`.
"""

max_chunk_size_tokens: Required[int]
"""The maximum number of tokens in each chunk.

The default value is `800`. The minimum value is `100` and the maximum value is
`4096`.
"""


class ToolResourcesFileSearchVectorStoreChunkingStrategyStatic(TypedDict, total=False):
static: Required[ToolResourcesFileSearchVectorStoreChunkingStrategyStaticStatic]

type: Required[Literal["static"]]
"""Always `static`."""


ToolResourcesFileSearchVectorStoreChunkingStrategy = Union[
ToolResourcesFileSearchVectorStoreChunkingStrategyAuto, ToolResourcesFileSearchVectorStoreChunkingStrategyStatic
]


class ToolResourcesFileSearchVectorStore(TypedDict, total=False):
chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
"""The chunking strategy used to chunk the file(s).

If not set, will use the `auto` strategy.
"""

file_ids: List[str]
"""
A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
Expand Down
Loading