Skip to content

Commit 83af651

Browse files
committed
Enhance tool calling to support multi-step orchestration
This commit improves the Meta Llama tool calling infinite loop fix by replacing the overly restrictive single-tool limitation with intelligent multi-step support that allows up to 8 sequential tool calls. Changes: - Add max_sequential_tool_calls parameter (default: 8) to OCIGenAIBase - Implement loop detection algorithm that identifies when the same tool is called repeatedly with identical arguments - Replace unconditional tool_choice="none" with conditional logic that only forces stop when limit exceeded or loop detected - Otherwise allow default model behavior for continued tool calling Benefits: - Prevents infinite loops (original goal maintained) - Enables multi-step tool orchestration (new capability) - Fully backward compatible via default parameter - Configurable per use case - Domain-agnostic implementation Tested with integration tests showing successful multi-step workflows for diagnostic and remediation scenarios.
1 parent a83c749 commit 83af651

File tree

2 files changed

+65
-4
lines changed

2 files changed

+65
-4
lines changed

libs/oci/langchain_oci/chat_models/oci_generative_ai.py

Lines changed: 61 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -747,14 +747,71 @@ def messages_to_oci_params(
747747
"api_format": self.chat_api_format,
748748
}
749749

750-
# BUGFIX: If tool results have been received and tools are bound,
751-
# set tool_choice to "none" to prevent the model from making
752-
# additional tool calls in a loop.
750+
# BUGFIX: Intelligently manage tool_choice to prevent infinite loops
751+
# while allowing legitimate multi-step tool orchestration.
753752
# This addresses a known issue with Meta Llama models that
754753
# continue calling tools even after receiving results.
754+
755+
def _should_allow_more_tool_calls(
756+
messages: List[BaseMessage],
757+
max_tool_calls: int
758+
) -> bool:
759+
"""
760+
Determine if the model should be allowed to call more tools.
761+
762+
Returns False (force stop) if:
763+
- Tool call limit exceeded
764+
- Infinite loop detected (same tool called repeatedly with same args)
765+
766+
Returns True otherwise to allow multi-step tool orchestration.
767+
768+
Args:
769+
messages: Conversation history
770+
max_tool_calls: Maximum number of tool calls before forcing stop
771+
"""
772+
# Count total tool calls made so far
773+
tool_call_count = sum(
774+
1 for msg in messages
775+
if isinstance(msg, ToolMessage)
776+
)
777+
778+
# Safety limit: prevent runaway tool calling
779+
if tool_call_count >= max_tool_calls:
780+
return False
781+
782+
# Detect infinite loop: same tool called with same arguments in succession
783+
recent_calls = []
784+
for msg in reversed(messages):
785+
if hasattr(msg, 'tool_calls') and msg.tool_calls:
786+
for tc in msg.tool_calls:
787+
# Create signature: (tool_name, sorted_args)
788+
try:
789+
args_str = json.dumps(tc.get('args', {}), sort_keys=True)
790+
signature = (tc.get('name', ''), args_str)
791+
792+
# Check if this exact call was made in last 2 calls
793+
if signature in recent_calls[-2:]:
794+
return False # Infinite loop detected
795+
796+
recent_calls.append(signature)
797+
except Exception:
798+
# If we can't serialize args, be conservative and continue
799+
pass
800+
801+
# Only check last 4 AI messages (last 4 tool call attempts)
802+
if len(recent_calls) >= 4:
803+
break
804+
805+
return True
806+
755807
has_tool_results = any(isinstance(msg, ToolMessage) for msg in messages)
756808
if has_tool_results and "tools" in kwargs and "tool_choice" not in kwargs:
757-
result["tool_choice"] = self.oci_tool_choice_none()
809+
if not _should_allow_more_tool_calls(
810+
messages, self.max_sequential_tool_calls
811+
):
812+
# Force model to stop and provide final answer
813+
result["tool_choice"] = self.oci_tool_choice_none()
814+
# else: Allow model to decide (default behavior)
758815

759816
return result
760817

libs/oci/langchain_oci/llms/oci_generative_ai.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,10 @@ class OCIGenAIBase(BaseModel, ABC):
116116
is_stream: bool = False
117117
"""Whether to stream back partial progress"""
118118

119+
max_sequential_tool_calls: int = 8
120+
"""Maximum tool calls before forcing final answer.
121+
Prevents infinite loops while allowing multi-step orchestration."""
122+
119123
model_config = ConfigDict(
120124
extra="forbid", arbitrary_types_allowed=True, protected_namespaces=()
121125
)

0 commit comments

Comments
 (0)