We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e8c9873 commit 2eba424Copy full SHA for 2eba424
mini_agent/llm/openai_client.py
@@ -65,12 +65,8 @@ async def _make_api_request(
65
params = {
66
"model": self.model,
67
"messages": api_messages,
68
- # Add consistent parameters for better KV cache stability
69
- "temperature": 0.7, # Consistent temperature
70
- "seed": 42, # Fixed seed for deterministic behavior and better caching
71
# Enable reasoning_split to separate thinking content
72
- # NOTE: Commenting out for local models - may cause cache invalidation
73
- # "extra_body": {"reasoning_split": True},
+ "extra_body": {"reasoning_split": True},
74
}
75
76
if tools:
0 commit comments