|
| 1 | +import json |
1 | 2 | import pytest |
2 | 3 | from unittest import mock |
3 | 4 | from datetime import datetime |
@@ -546,3 +547,61 @@ def dict(self): |
546 | 547 |
|
547 | 548 | # Should have extracted the response message |
548 | 549 | assert SPANDATA.GEN_AI_RESPONSE_TEXT in span["data"] |
| 550 | + |
| 551 | + |
| 552 | +def test_litellm_message_truncation(sentry_init, capture_events): |
| 553 | + """Test that large messages are truncated properly in LiteLLM integration.""" |
| 554 | + sentry_init( |
| 555 | + integrations=[LiteLLMIntegration(include_prompts=True)], |
| 556 | + traces_sample_rate=1.0, |
| 557 | + send_default_pii=True, |
| 558 | + ) |
| 559 | + events = capture_events() |
| 560 | + |
| 561 | + large_content = ( |
| 562 | + "This is a very long message that will exceed our size limits. " * 1000 |
| 563 | + ) |
| 564 | + messages = [ |
| 565 | + {"role": "user", "content": "small message 1"}, |
| 566 | + {"role": "assistant", "content": large_content}, |
| 567 | + {"role": "user", "content": large_content}, |
| 568 | + {"role": "assistant", "content": "small message 4"}, |
| 569 | + {"role": "user", "content": "small message 5"}, |
| 570 | + ] |
| 571 | + mock_response = MockCompletionResponse() |
| 572 | + |
| 573 | + with start_transaction(name="litellm test"): |
| 574 | + kwargs = { |
| 575 | + "model": "gpt-3.5-turbo", |
| 576 | + "messages": messages, |
| 577 | + } |
| 578 | + |
| 579 | + _input_callback(kwargs) |
| 580 | + _success_callback( |
| 581 | + kwargs, |
| 582 | + mock_response, |
| 583 | + datetime.now(), |
| 584 | + datetime.now(), |
| 585 | + ) |
| 586 | + |
| 587 | + assert len(events) > 0 |
| 588 | + tx = events[0] |
| 589 | + assert tx["type"] == "transaction" |
| 590 | + |
| 591 | + chat_spans = [ |
| 592 | + span for span in tx.get("spans", []) if span.get("op") == OP.GEN_AI_CHAT |
| 593 | + ] |
| 594 | + assert len(chat_spans) > 0 |
| 595 | + |
| 596 | + chat_span = chat_spans[0] |
| 597 | + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["data"] |
| 598 | + |
| 599 | + messages_data = chat_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] |
| 600 | + assert isinstance(messages_data, str) |
| 601 | + |
| 602 | + parsed_messages = json.loads(messages_data) |
| 603 | + assert isinstance(parsed_messages, list) |
| 604 | + assert len(parsed_messages) == 2 |
| 605 | + assert "small message 4" in str(parsed_messages[0]) |
| 606 | + assert "small message 5" in str(parsed_messages[1]) |
| 607 | + assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5 |
0 commit comments