Skip to content

release: 1.2.3 #765

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Nov 10, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "1.2.2"
".": "1.2.3"
}
17 changes: 17 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,22 @@
# Changelog

## 1.2.3 (2023-11-10)

Full Changelog: [v1.2.2...v1.2.3](https://github.com/openai/openai-python/compare/v1.2.2...v1.2.3)

### Bug Fixes

* **cli/audio:** file format detection failing for whisper ([#733](https://github.com/openai/openai-python/issues/733)) ([01079d6](https://github.com/openai/openai-python/commit/01079d6dca13e0ec158dff81e0706d8a9d6c02ef))
* **client:** correctly flush the stream response body ([#771](https://github.com/openai/openai-python/issues/771)) ([0d52731](https://github.com/openai/openai-python/commit/0d5273165c96286f8456ae04b9eb0de5144e52f8))
* **client:** serialise pydantic v1 default fields correctly in params ([#776](https://github.com/openai/openai-python/issues/776)) ([d4c49ad](https://github.com/openai/openai-python/commit/d4c49ad2be9c0d926eece5fd33f6836279ea21e2))
* **models:** mark unknown fields as set in pydantic v1 ([#772](https://github.com/openai/openai-python/issues/772)) ([ae032a1](https://github.com/openai/openai-python/commit/ae032a1ba4efa72284a572bfaf0305af50142835))
* prevent IndexError in fine-tunes CLI ([#768](https://github.com/openai/openai-python/issues/768)) ([42f1633](https://github.com/openai/openai-python/commit/42f16332cf0f96f243f9797d6406283865254355))


### Documentation

* reword package description ([#764](https://github.com/openai/openai-python/issues/764)) ([9ff10df](https://github.com/openai/openai-python/commit/9ff10df30ca2d44978eb5f982ccf039c9f1bf1bf))

## 1.2.2 (2023-11-09)

Full Changelog: [v1.2.1...v1.2.2](https://github.com/openai/openai-python/compare/v1.2.1...v1.2.2)
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[project]
name = "openai"
version = "1.2.2"
description = "Client library for the openai API"
version = "1.2.3"
description = "The official Python library for the openai API"
readme = "README.md"
license = "Apache-2.0"
authors = [
Expand Down
1 change: 1 addition & 0 deletions src/openai/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ def construct(
if PYDANTIC_V2:
_extra[key] = value
else:
_fields_set.add(key)
fields_values[key] = value

object.__setattr__(m, "__dict__", fields_values)
Expand Down
14 changes: 12 additions & 2 deletions src/openai/_streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,9 @@ def __stream__(self) -> Iterator[ResponseT]:
cast_to = self._cast_to
response = self.response
process_data = self._client._process_response_data
iterator = self._iter_events()

for sse in self._iter_events():
for sse in iterator:
if sse.data.startswith("[DONE]"):
break

Expand All @@ -63,6 +64,10 @@ def __stream__(self) -> Iterator[ResponseT]:

yield process_data(data=data, cast_to=cast_to, response=response)

# Ensure the entire stream is consumed
for sse in iterator:
...


class AsyncStream(Generic[ResponseT]):
"""Provides the core interface to iterate over an asynchronous stream response."""
Expand Down Expand Up @@ -97,8 +102,9 @@ async def __stream__(self) -> AsyncIterator[ResponseT]:
cast_to = self._cast_to
response = self.response
process_data = self._client._process_response_data
iterator = self._iter_events()

async for sse in self._iter_events():
async for sse in iterator:
if sse.data.startswith("[DONE]"):
break

Expand All @@ -113,6 +119,10 @@ async def __stream__(self) -> AsyncIterator[ResponseT]:

yield process_data(data=data, cast_to=cast_to, response=response)

# Ensure the entire stream is consumed
async for sse in iterator:
...


class ServerSentEvent:
def __init__(
Expand Down
2 changes: 1 addition & 1 deletion src/openai/_utils/_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def _transform_recursive(
return data

if isinstance(data, pydantic.BaseModel):
return model_dump(data, exclude_unset=True, exclude_defaults=True)
return model_dump(data, exclude_unset=True)

return _transform_value(data, annotation)

Expand Down
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless.

__title__ = "openai"
__version__ = "1.2.2" # x-release-please-version
__version__ = "1.2.3" # x-release-please-version
4 changes: 2 additions & 2 deletions src/openai/cli/_api/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def transcribe(args: CLITranscribeArgs) -> None:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")

model = get_client().audio.transcriptions.create(
file=buffer_reader,
file=(args.file, buffer_reader),
model=args.model,
language=args.language or NOT_GIVEN,
temperature=args.temperature or NOT_GIVEN,
Expand All @@ -83,7 +83,7 @@ def translate(args: CLITranslationArgs) -> None:
buffer_reader = BufferReader(file_reader.read(), desc="Upload progress")

model = get_client().audio.translations.create(
file=buffer_reader,
file=(args.file, buffer_reader),
model=args.model,
temperature=args.temperature or NOT_GIVEN,
prompt=args.prompt or NOT_GIVEN,
Expand Down
2 changes: 1 addition & 1 deletion src/openai/lib/_validators.py
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,7 @@ def completions_space_start_validator(df: pd.DataFrame) -> Remediation:
"""

def add_space_start(x: Any) -> Any:
x["completion"] = x["completion"].apply(lambda x: ("" if x[0] == " " else " ") + x)
x["completion"] = x["completion"].apply(lambda s: ("" if s.startswith(" ") else " ") + s)
return x

optional_msg = None
Expand Down
6 changes: 6 additions & 0 deletions tests/api_resources/audio/test_speech.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ class TestSpeech:
loose_client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=False)
parametrize = pytest.mark.parametrize("client", [strict_client, loose_client], ids=["strict", "loose"])

@pytest.mark.skip(reason="Mocked tests are currently broken")
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None:
Expand All @@ -33,6 +34,7 @@ def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None:
assert isinstance(speech, BinaryResponseContent)
assert speech.json() == {"foo": "bar"}

@pytest.mark.skip(reason="Mocked tests are currently broken")
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRouter) -> None:
Expand All @@ -48,6 +50,7 @@ def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRou
assert isinstance(speech, BinaryResponseContent)
assert speech.json() == {"foo": "bar"}

@pytest.mark.skip(reason="Mocked tests are currently broken")
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> None:
Expand All @@ -68,6 +71,7 @@ class TestAsyncSpeech:
loose_client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=False)
parametrize = pytest.mark.parametrize("client", [strict_client, loose_client], ids=["strict", "loose"])

@pytest.mark.skip(reason="Mocked tests are currently broken")
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_method_create(self, client: AsyncOpenAI, respx_mock: MockRouter) -> None:
Expand All @@ -80,6 +84,7 @@ async def test_method_create(self, client: AsyncOpenAI, respx_mock: MockRouter)
assert isinstance(speech, BinaryResponseContent)
assert speech.json() == {"foo": "bar"}

@pytest.mark.skip(reason="Mocked tests are currently broken")
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_method_create_with_all_params(self, client: AsyncOpenAI, respx_mock: MockRouter) -> None:
Expand All @@ -95,6 +100,7 @@ async def test_method_create_with_all_params(self, client: AsyncOpenAI, respx_mo
assert isinstance(speech, BinaryResponseContent)
assert speech.json() == {"foo": "bar"}

@pytest.mark.skip(reason="Mocked tests are currently broken")
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_raw_response_create(self, client: AsyncOpenAI, respx_mock: MockRouter) -> None:
Expand Down
4 changes: 4 additions & 0 deletions tests/api_resources/test_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ def test_raw_response_delete(self, client: OpenAI) -> None:
file = response.parse()
assert_matches_type(FileDeleted, file, path=["response"])

@pytest.mark.skip(reason="mocked response isn't working yet")
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_method_content(self, client: OpenAI, respx_mock: MockRouter) -> None:
Expand All @@ -105,6 +106,7 @@ def test_method_content(self, client: OpenAI, respx_mock: MockRouter) -> None:
assert isinstance(file, BinaryResponseContent)
assert file.json() == {"foo": "bar"}

@pytest.mark.skip(reason="mocked response isn't working yet")
@parametrize
@pytest.mark.respx(base_url=base_url)
def test_raw_response_content(self, client: OpenAI, respx_mock: MockRouter) -> None:
Expand Down Expand Up @@ -210,6 +212,7 @@ async def test_raw_response_delete(self, client: AsyncOpenAI) -> None:
file = response.parse()
assert_matches_type(FileDeleted, file, path=["response"])

@pytest.mark.skip(reason="mocked response isn't working yet")
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_method_content(self, client: AsyncOpenAI, respx_mock: MockRouter) -> None:
Expand All @@ -220,6 +223,7 @@ async def test_method_content(self, client: AsyncOpenAI, respx_mock: MockRouter)
assert isinstance(file, BinaryResponseContent)
assert file.json() == {"foo": "bar"}

@pytest.mark.skip(reason="mocked response isn't working yet")
@parametrize
@pytest.mark.respx(base_url=base_url)
async def test_raw_response_content(self, client: AsyncOpenAI, respx_mock: MockRouter) -> None:
Expand Down
12 changes: 6 additions & 6 deletions tests/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,12 @@ class TestOpenAI:

@pytest.mark.respx(base_url=base_url)
def test_raw_response(self, respx_mock: MockRouter) -> None:
respx_mock.post("/foo").mock(return_value=httpx.Response(200, json='{"foo": "bar"}'))
respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))

response = self.client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == '{"foo": "bar"}'
assert response.json() == {"foo": "bar"}

@pytest.mark.respx(base_url=base_url)
def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None:
Expand All @@ -57,7 +57,7 @@ def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None:
response = self.client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == '{"foo": "bar"}'
assert response.json() == {"foo": "bar"}

def test_copy(self) -> None:
copied = self.client.copy()
Expand Down Expand Up @@ -571,12 +571,12 @@ class TestAsyncOpenAI:
@pytest.mark.respx(base_url=base_url)
@pytest.mark.asyncio
async def test_raw_response(self, respx_mock: MockRouter) -> None:
respx_mock.post("/foo").mock(return_value=httpx.Response(200, json='{"foo": "bar"}'))
respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))

response = await self.client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == '{"foo": "bar"}'
assert response.json() == {"foo": "bar"}

@pytest.mark.respx(base_url=base_url)
@pytest.mark.asyncio
Expand All @@ -588,7 +588,7 @@ async def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None:
response = await self.client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == '{"foo": "bar"}'
assert response.json() == {"foo": "bar"}

def test_copy(self) -> None:
copied = self.client.copy()
Expand Down
7 changes: 5 additions & 2 deletions tests/test_module_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,10 @@ def test_azure_api_key_env_without_api_version() -> None:
openai.api_type = None
_os.environ["AZURE_OPENAI_API_KEY"] = "example API key"

with pytest.raises(ValueError, match=r"Expected `api_version` to be given for the Azure client"):
with pytest.raises(
ValueError,
match=r"Must provide either the `api_version` argument or the `OPENAI_API_VERSION` environment variable",
):
openai.completions._client


Expand All @@ -137,7 +140,7 @@ def test_azure_api_key_and_version_env() -> None:

with pytest.raises(
ValueError,
match=r"Must provide one of the `base_url` or `azure_endpoint` arguments, or the `OPENAI_BASE_URL`",
match=r"Must provide one of the `base_url` or `azure_endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable",
):
openai.completions._client

Expand Down
37 changes: 35 additions & 2 deletions tests/test_transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import pytest

from openai._utils import PropertyInfo, transform, parse_datetime
from openai._compat import PYDANTIC_V2
from openai._models import BaseModel


Expand Down Expand Up @@ -210,14 +211,20 @@ def test_pydantic_unknown_field() -> None:

def test_pydantic_mismatched_types() -> None:
model = MyModel.construct(foo=True)
with pytest.warns(UserWarning):
if PYDANTIC_V2:
with pytest.warns(UserWarning):
params = transform(model, Any)
else:
params = transform(model, Any)
assert params == {"foo": True}


def test_pydantic_mismatched_object_type() -> None:
model = MyModel.construct(foo=MyModel.construct(hello="world"))
with pytest.warns(UserWarning):
if PYDANTIC_V2:
with pytest.warns(UserWarning):
params = transform(model, Any)
else:
params = transform(model, Any)
assert params == {"foo": {"hello": "world"}}

Expand All @@ -230,3 +237,29 @@ def test_pydantic_nested_objects() -> None:
model = ModelNestedObjects.construct(nested={"foo": "stainless"})
assert isinstance(model.nested, MyModel)
assert transform(model, Any) == {"nested": {"foo": "stainless"}}


class ModelWithDefaultField(BaseModel):
foo: str
with_none_default: Union[str, None] = None
with_str_default: str = "foo"


def test_pydantic_default_field() -> None:
# should be excluded when defaults are used
model = ModelWithDefaultField.construct()
assert model.with_none_default is None
assert model.with_str_default == "foo"
assert transform(model, Any) == {}

# should be included when the default value is explicitly given
model = ModelWithDefaultField.construct(with_none_default=None, with_str_default="foo")
assert model.with_none_default is None
assert model.with_str_default == "foo"
assert transform(model, Any) == {"with_none_default": None, "with_str_default": "foo"}

# should be included when a non-default value is explicitly given
model = ModelWithDefaultField.construct(with_none_default="bar", with_str_default="baz")
assert model.with_none_default == "bar"
assert model.with_str_default == "baz"
assert transform(model, Any) == {"with_none_default": "bar", "with_str_default": "baz"}