Skip to content

Commit 2e259ae

Browse files
authored
fix(ai): add message trunction to anthropic (#4953)
1 parent 23ec398 commit 2e259ae

File tree

2 files changed

+57
-5
lines changed

2 files changed

+57
-5
lines changed

sentry_sdk/integrations/anthropic.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
from sentry_sdk.ai.utils import (
77
set_data_normalized,
88
normalize_message_roles,
9+
truncate_and_annotate_messages,
910
get_start_span_function,
1011
)
1112
from sentry_sdk.consts import OP, SPANDATA, SPANSTATUS
@@ -145,12 +146,14 @@ def _set_input_data(span, kwargs, integration):
145146
normalized_messages.append(message)
146147

147148
role_normalized_messages = normalize_message_roles(normalized_messages)
148-
set_data_normalized(
149-
span,
150-
SPANDATA.GEN_AI_REQUEST_MESSAGES,
151-
role_normalized_messages,
152-
unpack=False,
149+
scope = sentry_sdk.get_current_scope()
150+
messages_data = truncate_and_annotate_messages(
151+
role_normalized_messages, span, scope
153152
)
153+
if messages_data is not None:
154+
set_data_normalized(
155+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages_data, unpack=False
156+
)
154157

155158
set_data_normalized(
156159
span, SPANDATA.GEN_AI_RESPONSE_STREAMING, kwargs.get("stream", False)

tests/integrations/anthropic/test_anthropic.py

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -945,3 +945,52 @@ def mock_messages_create(*args, **kwargs):
945945
# Verify no "ai" roles remain
946946
roles = [msg["role"] for msg in stored_messages]
947947
assert "ai" not in roles
948+
949+
950+
def test_anthropic_message_truncation(sentry_init, capture_events):
951+
"""Test that large messages are truncated properly in Anthropic integration."""
952+
sentry_init(
953+
integrations=[AnthropicIntegration(include_prompts=True)],
954+
traces_sample_rate=1.0,
955+
send_default_pii=True,
956+
)
957+
events = capture_events()
958+
959+
client = Anthropic(api_key="z")
960+
client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE)
961+
962+
large_content = (
963+
"This is a very long message that will exceed our size limits. " * 1000
964+
)
965+
messages = [
966+
{"role": "user", "content": "small message 1"},
967+
{"role": "assistant", "content": large_content},
968+
{"role": "user", "content": large_content},
969+
{"role": "assistant", "content": "small message 4"},
970+
{"role": "user", "content": "small message 5"},
971+
]
972+
973+
with start_transaction():
974+
client.messages.create(max_tokens=1024, messages=messages, model="model")
975+
976+
assert len(events) > 0
977+
tx = events[0]
978+
assert tx["type"] == "transaction"
979+
980+
chat_spans = [
981+
span for span in tx.get("spans", []) if span.get("op") == OP.GEN_AI_CHAT
982+
]
983+
assert len(chat_spans) > 0
984+
985+
chat_span = chat_spans[0]
986+
assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["data"]
987+
988+
messages_data = chat_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]
989+
assert isinstance(messages_data, str)
990+
991+
parsed_messages = json.loads(messages_data)
992+
assert isinstance(parsed_messages, list)
993+
assert len(parsed_messages) == 2
994+
assert "small message 4" in str(parsed_messages[0])
995+
assert "small message 5" in str(parsed_messages[1])
996+
assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5

0 commit comments

Comments
 (0)