Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import logging
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import Any, Generic, SupportsFloat, TypeVar, overload
Expand All @@ -9,6 +10,8 @@

from key_value.aio.protocols.key_value import AsyncKeyValue

logger = logging.getLogger(__name__)

T = TypeVar("T")


Expand Down Expand Up @@ -57,6 +60,17 @@ def _validate_model(self, value: dict[str, Any]) -> T | None:
if self._raise_on_validation_error:
msg = f"Invalid {self._get_model_type_name()} payload: missing 'items' wrapper"
raise DeserializationError(msg)

# Log the missing 'items' wrapper when not raising
logger.error(
"Missing 'items' wrapper for list %s",
self._get_model_type_name(),
extra={
"model_type": self._get_model_type_name(),
"error": "missing 'items' wrapper",
},
exc_info=False,
)
return None
return self._type_adapter.validate_python(value["items"])

Expand All @@ -66,6 +80,19 @@ def _validate_model(self, value: dict[str, Any]) -> T | None:
details = e.errors(include_input=False)
msg = f"Invalid {self._get_model_type_name()}: {details}"
raise DeserializationError(msg) from e

# Log the validation error when not raising
error_details = e.errors(include_input=False)
logger.error(
"Validation failed for %s",
self._get_model_type_name(),
extra={
"model_type": self._get_model_type_name(),
"error_count": len(error_details),
"errors": error_details,
},
exc_info=True,
)
return None

def _serialize_model(self, value: T) -> dict[str, Any]:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import logging
from collections.abc import Sequence
from datetime import datetime
from typing import Any, overload
Expand Down Expand Up @@ -39,6 +40,8 @@
raise ImportError(msg) from e


logger = logging.getLogger(__name__)

DEFAULT_INDEX_PREFIX = "kv_store"

DEFAULT_MAPPING = {
Expand Down Expand Up @@ -289,7 +292,19 @@ async def _get_managed_entries(self, *, collection: str, keys: Sequence[str]) ->
entries_by_id[doc_id] = None
continue

entries_by_id[doc_id] = source_to_managed_entry(source=source)
try:
entries_by_id[doc_id] = source_to_managed_entry(source=source)
except DeserializationError as e:
logger.error(
"Failed to deserialize Elasticsearch document in batch operation",
extra={
"collection": collection,
"document_id": doc_id,
"error": str(e),
},
exc_info=True,
)
entries_by_id[doc_id] = None

# Return entries in the same order as input keys
return [entries_by_id.get(document_id) for document_id in document_ids]
Expand Down
72 changes: 72 additions & 0 deletions key-value/key-value-aio/tests/adapters/test_pydantic.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from datetime import datetime, timezone
from logging import LogRecord

import pytest
from inline_snapshot import snapshot
Expand Down Expand Up @@ -47,6 +48,27 @@ class Order(BaseModel):
TEST_KEY_2: str = "test_key_2"


def model_type_from_log_record(record: LogRecord) -> str:
if not hasattr(record, "model_type"):
msg = "Log record does not have a model_type attribute"
raise ValueError(msg)
return record.model_type # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType, reportAttributeAccessIssue]


def error_from_log_record(record: LogRecord) -> str:
if not hasattr(record, "error"):
msg = "Log record does not have an error attribute"
raise ValueError(msg)
return record.error # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType, reportAttributeAccessIssue]


def errors_from_log_record(record: LogRecord) -> list[str]:
if not hasattr(record, "errors"):
msg = "Log record does not have an errors attribute"
raise ValueError(msg)
return record.errors # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType, reportAttributeAccessIssue]


class TestPydanticAdapter:
@pytest.fixture
async def store(self) -> MemoryStore:
Expand Down Expand Up @@ -145,3 +167,53 @@ async def test_complex_adapter_with_list(self, product_list_adapter: PydanticAda

assert await product_list_adapter.delete(collection=TEST_COLLECTION, key=TEST_KEY)
assert await product_list_adapter.get(collection=TEST_COLLECTION, key=TEST_KEY) is None

async def test_validation_error_logging(
self, user_adapter: PydanticAdapter[User], updated_user_adapter: PydanticAdapter[UpdatedUser], caplog: pytest.LogCaptureFixture
):
"""Test that validation errors are logged when raise_on_validation_error=False."""
import logging

# Store a User, then try to retrieve as UpdatedUser (missing is_admin field)
await user_adapter.put(collection=TEST_COLLECTION, key=TEST_KEY, value=SAMPLE_USER)

with caplog.at_level(logging.ERROR):
updated_user = await updated_user_adapter.get(collection=TEST_COLLECTION, key=TEST_KEY)

# Should return None due to validation failure
assert updated_user is None

# Check that an error was logged
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == "ERROR"
assert "Validation failed" in record.message
assert model_type_from_log_record(record) == "Pydantic model"

errors = errors_from_log_record(record)
assert len(errors) == 1
assert "is_admin" in str(errors[0])

Comment on lines +171 to +196
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

Comprehensive test for validation error logging.

The test correctly verifies that validation failures are logged at ERROR level with appropriate context. The test logic is sound, and assertions properly validate the logging behavior.

Consider moving the logging import (line 175) to the top of the file with other imports for consistency, though the current placement is acceptable:

 import pytest
 from inline_snapshot import snapshot
 from key_value.shared.errors import DeserializationError
 from pydantic import AnyHttpUrl, BaseModel
+import logging

 from key_value.aio.adapters.pydantic import PydanticAdapter

Then remove the inline import:

     async def test_validation_error_logging(
         self, user_adapter: PydanticAdapter[User], updated_user_adapter: PydanticAdapter[UpdatedUser], caplog: pytest.LogCaptureFixture
     ):
         """Test that validation errors are logged when raise_on_validation_error=False."""
-        import logging

         # Store a User, then try to retrieve as UpdatedUser (missing is_admin field)

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In key-value/key-value-aio/tests/adapters/test_pydantic.py around lines 171 to
196, there is an inline "import logging" at line 175; move that import to the
top of the file with the other imports and delete the inline import, ensuring no
other references are changed and tests still use caplog.at_level(logging.ERROR).

async def test_list_validation_error_logging(
self, product_list_adapter: PydanticAdapter[list[Product]], store: MemoryStore, caplog: pytest.LogCaptureFixture
):
"""Test that missing 'items' wrapper is logged for list models."""
import logging

# Manually store invalid data (missing 'items' wrapper)
await store.put(collection=TEST_COLLECTION, key=TEST_KEY, value={"invalid": "data"})

with caplog.at_level(logging.ERROR):
result = await product_list_adapter.get(collection=TEST_COLLECTION, key=TEST_KEY)

# Should return None due to missing 'items' wrapper
assert result is None

# Check that an error was logged
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == "ERROR"
assert "Missing 'items' wrapper" in record.message
assert model_type_from_log_record(record) == "Pydantic model"
error = error_from_log_record(record)
assert "missing 'items' wrapper" in str(error)
Comment on lines +197 to +219
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

Well-designed test for list validation error logging.

The test properly validates that missing 'items' wrapper errors are logged correctly. The use of error_from_log_record (singular) is appropriate here, and the variable naming is consistent.

Same optional suggestion as the previous test: consider moving the logging import to the module level for consistency:

 import pytest
 from inline_snapshot import snapshot
 from key_value.shared.errors import DeserializationError
 from pydantic import AnyHttpUrl, BaseModel
+import logging

 from key_value.aio.adapters.pydantic import PydanticAdapter

Then remove the inline import:

     async def test_list_validation_error_logging(
         self, product_list_adapter: PydanticAdapter[list[Product]], store: MemoryStore, caplog: pytest.LogCaptureFixture
     ):
         """Test that missing 'items' wrapper is logged for list models."""
-        import logging

         # Manually store invalid data (missing 'items' wrapper)

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In key-value/key-value-aio/tests/adapters/test_pydantic.py around lines 197 to
219, the test contains an inline "import logging"; move that import to the top
of the module with other imports and remove the inline import from the test body
so the test uses the module-level logging import for consistency with other
tests.

Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# WARNING: this file is auto-generated by 'build_sync_library.py'
# from the original file 'base.py'
# DO NOT CHANGE! Change the original file instead.
import logging
from abc import ABC, abstractmethod
from collections.abc import Sequence
from typing import Any, Generic, SupportsFloat, TypeVar, overload
Expand All @@ -12,6 +13,8 @@

from key_value.sync.code_gen.protocols.key_value import KeyValue

logger = logging.getLogger(__name__)

T = TypeVar("T")


Expand Down Expand Up @@ -60,6 +63,14 @@ def _validate_model(self, value: dict[str, Any]) -> T | None:
if self._raise_on_validation_error:
msg = f"Invalid {self._get_model_type_name()} payload: missing 'items' wrapper"
raise DeserializationError(msg)

# Log the missing 'items' wrapper when not raising
logger.error(
"Missing 'items' wrapper for list %s",
self._get_model_type_name(),
extra={"model_type": self._get_model_type_name(), "error": "missing 'items' wrapper"},
exc_info=False,
)
return None
return self._type_adapter.validate_python(value["items"])

Expand All @@ -69,6 +80,15 @@ def _validate_model(self, value: dict[str, Any]) -> T | None:
details = e.errors(include_input=False)
msg = f"Invalid {self._get_model_type_name()}: {details}"
raise DeserializationError(msg) from e

# Log the validation error when not raising
error_details = e.errors(include_input=False)
logger.error(
"Validation failed for %s",
self._get_model_type_name(),
extra={"model_type": self._get_model_type_name(), "error_count": len(error_details), "errors": error_details},
exc_info=True,
)
return None

def _serialize_model(self, value: T) -> dict[str, Any]:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# WARNING: this file is auto-generated by 'build_sync_library.py'
# from the original file 'store.py'
# DO NOT CHANGE! Change the original file instead.
import logging
from collections.abc import Sequence
from datetime import datetime
from typing import Any, overload
Expand Down Expand Up @@ -37,6 +38,8 @@
msg = "ElasticsearchStore requires py-key-value-aio[elasticsearch]"
raise ImportError(msg) from e

logger = logging.getLogger(__name__)

DEFAULT_INDEX_PREFIX = "kv_store"

# You might think the `string` field should be a text/keyword field
Expand Down Expand Up @@ -249,7 +252,15 @@ def _get_managed_entries(self, *, collection: str, keys: Sequence[str]) -> list[
entries_by_id[doc_id] = None
continue

entries_by_id[doc_id] = source_to_managed_entry(source=source)
try:
entries_by_id[doc_id] = source_to_managed_entry(source=source)
except DeserializationError as e:
logger.error(
"Failed to deserialize Elasticsearch document in batch operation",
extra={"collection": collection, "document_id": doc_id, "error": str(e)},
exc_info=True,
)
entries_by_id[doc_id] = None

# Return entries in the same order as input keys
return [entries_by_id.get(document_id) for document_id in document_ids]
Expand Down
72 changes: 72 additions & 0 deletions key-value/key-value-sync/tests/code_gen/adapters/test_pydantic.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
# from the original file 'test_pydantic.py'
# DO NOT CHANGE! Change the original file instead.
from datetime import datetime, timezone
from logging import LogRecord

import pytest
from inline_snapshot import snapshot
Expand Down Expand Up @@ -50,6 +51,27 @@ class Order(BaseModel):
TEST_KEY_2: str = "test_key_2"


def model_type_from_log_record(record: LogRecord) -> str:
if not hasattr(record, "model_type"):
msg = "Log record does not have a model_type attribute"
raise ValueError(msg)
return record.model_type # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType, reportAttributeAccessIssue]


def error_from_log_record(record: LogRecord) -> str:
if not hasattr(record, "error"):
msg = "Log record does not have an error attribute"
raise ValueError(msg)
return record.error # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType, reportAttributeAccessIssue]


def errors_from_log_record(record: LogRecord) -> list[str]:
if not hasattr(record, "errors"):
msg = "Log record does not have an errors attribute"
raise ValueError(msg)
return record.errors # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType, reportAttributeAccessIssue]


class TestPydanticAdapter:
@pytest.fixture
def store(self) -> MemoryStore:
Expand Down Expand Up @@ -148,3 +170,53 @@ def test_complex_adapter_with_list(self, product_list_adapter: PydanticAdapter[l

assert product_list_adapter.delete(collection=TEST_COLLECTION, key=TEST_KEY)
assert product_list_adapter.get(collection=TEST_COLLECTION, key=TEST_KEY) is None

def test_validation_error_logging(
self, user_adapter: PydanticAdapter[User], updated_user_adapter: PydanticAdapter[UpdatedUser], caplog: pytest.LogCaptureFixture
):
"""Test that validation errors are logged when raise_on_validation_error=False."""
import logging

# Store a User, then try to retrieve as UpdatedUser (missing is_admin field)
user_adapter.put(collection=TEST_COLLECTION, key=TEST_KEY, value=SAMPLE_USER)

with caplog.at_level(logging.ERROR):
updated_user = updated_user_adapter.get(collection=TEST_COLLECTION, key=TEST_KEY)

# Should return None due to validation failure
assert updated_user is None

# Check that an error was logged
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == "ERROR"
assert "Validation failed" in record.message
assert model_type_from_log_record(record) == "Pydantic model"

errors = errors_from_log_record(record)
assert len(errors) == 1
assert "is_admin" in str(errors[0])

def test_list_validation_error_logging(
self, product_list_adapter: PydanticAdapter[list[Product]], store: MemoryStore, caplog: pytest.LogCaptureFixture
):
"""Test that missing 'items' wrapper is logged for list models."""
import logging

# Manually store invalid data (missing 'items' wrapper)
store.put(collection=TEST_COLLECTION, key=TEST_KEY, value={"invalid": "data"})

with caplog.at_level(logging.ERROR):
result = product_list_adapter.get(collection=TEST_COLLECTION, key=TEST_KEY)

# Should return None due to missing 'items' wrapper
assert result is None

# Check that an error was logged
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == "ERROR"
assert "Missing 'items' wrapper" in record.message
assert model_type_from_log_record(record) == "Pydantic model"
error = error_from_log_record(record)
assert "missing 'items' wrapper" in str(error)