Skip to content

Commit a317f22

Browse files
fede-kamelclaude
andauthored
Fix test_tool_choice_none_after_tool_results to match actual behavior (#57)
The test was failing after rebase because it used non-existent OCI SDK classes (models.Tool) and had incorrect expectations about when tool_choice is set to 'none'. Changes: 1. Replace OCI SDK mock objects with Python function (following pattern from other tests in the file) 2. Update test to trigger actual tool_choice=none behavior by exceeding max_sequential_tool_calls limit (3 tool calls) 3. Fix _prepare_request call signature (add stop parameter) 4. Pass bound model kwargs to _prepare_request (required for tools) 5. Update docstring to accurately describe what's being tested The test now correctly validates that tool_choice is set to ToolChoiceNone when the max_sequential_tool_calls limit is reached, preventing infinite tool calling loops. Related to PR #50 (infinite loop fix) and PR #53 (tool call optimization). 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-authored-by: Claude <[email protected]>
1 parent 8b18374 commit a317f22

File tree

1 file changed

+26
-32
lines changed

1 file changed

+26
-32
lines changed

libs/oci/tests/unit_tests/chat_models/test_oci_generative_ai.py

Lines changed: 26 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -771,56 +771,50 @@ def test_get_provider():
771771

772772
@pytest.mark.requires("oci")
773773
def test_tool_choice_none_after_tool_results() -> None:
774-
"""Test that tool_choice is set to 'none' when ToolMessages are present.
774+
"""Test that tool_choice is set to 'none' when max_sequential_tool_calls is exceeded.
775775
776-
This prevents infinite loops with Meta Llama models that continue calling
777-
tools even after receiving results when tools are bound to the model.
776+
This prevents infinite loops with Meta Llama models by limiting the number
777+
of sequential tool calls.
778778
"""
779779
from langchain_core.messages import ToolMessage
780780
from oci.generative_ai_inference import models
781781

782782
oci_gen_ai_client = MagicMock()
783783
llm = ChatOCIGenAI(
784784
model_id="meta.llama-3.3-70b-instruct",
785-
client=oci_gen_ai_client
785+
client=oci_gen_ai_client,
786+
max_sequential_tool_calls=3 # Set limit to 3 for testing
786787
)
787788

788-
# Mock tools
789-
mock_tools = [
790-
models.Tool(
791-
type="FUNCTION",
792-
function=models.FunctionDefinition(
793-
name="get_weather",
794-
description="Get weather for a city",
795-
parameters={}
796-
)
797-
)
798-
]
789+
# Define a simple tool function (following the pattern from other tests)
790+
def get_weather(city: str) -> str:
791+
"""Get weather for a city.
792+
793+
Args:
794+
city: The city to get weather for
795+
"""
796+
return f"Weather in {city}"
799797

800798
# Bind tools to model
801-
llm_with_tools = llm.bind_tools(mock_tools)
799+
llm_with_tools = llm.bind_tools([get_weather])
802800

803-
# Create conversation with ToolMessage
801+
# Create conversation with 3 ToolMessages (at the limit)
804802
messages = [
805803
HumanMessage(content="What's the weather?"),
806-
AIMessage(
807-
content="",
808-
tool_calls=[{
809-
"id": "call_123",
810-
"name": "get_weather",
811-
"args": {"city": "Chicago"}
812-
}]
813-
),
814-
ToolMessage(
815-
content="Sunny, 65°F",
816-
tool_call_id="call_123"
817-
)
804+
AIMessage(content="", tool_calls=[{"id": "call_1", "name": "get_weather", "args": {"city": "Chicago"}}]),
805+
ToolMessage(content="Sunny, 65°F", tool_call_id="call_1"),
806+
AIMessage(content="", tool_calls=[{"id": "call_2", "name": "get_weather", "args": {"city": "New York"}}]),
807+
ToolMessage(content="Rainy, 55°F", tool_call_id="call_2"),
808+
AIMessage(content="", tool_calls=[{"id": "call_3", "name": "get_weather", "args": {"city": "Seattle"}}]),
809+
ToolMessage(content="Cloudy, 60°F", tool_call_id="call_3")
818810
]
819811

820-
# Prepare the request
821-
request = llm_with_tools._prepare_request(messages, stream=False)
812+
# Prepare the request - need to pass tools from the bound model kwargs
813+
request = llm_with_tools._prepare_request(
814+
messages, stop=None, stream=False, **llm_with_tools.kwargs
815+
)
822816

823-
# Verify that tool_choice is set to 'none'
817+
# Verify that tool_choice is set to 'none' because limit was reached
824818
assert hasattr(request.chat_request, 'tool_choice')
825819
assert isinstance(request.chat_request.tool_choice, models.ToolChoiceNone)
826820
# Verify tools are still present (not removed, just choice is 'none')

0 commit comments

Comments
 (0)