Skip to content
This repository was archived by the owner on Jun 5, 2025. It is now read-only.

Commit 633079c

Browse files
committed
Fix the Ollama FIM completion
We were not passing the suffix at all which was breaking the FIM completion.
1 parent 6c9b508 commit 633079c

File tree

1 file changed

+13
-2
lines changed

1 file changed

+13
-2
lines changed

src/codegate/providers/ollama/completion_handler.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,20 @@ async def execute_completion(
8989
"""Stream response directly from Ollama API."""
9090
self.base_tool = base_tool
9191
if is_fim_request:
92-
prompt = request["messages"][0].get("content", "")
92+
prompt = ""
93+
for i in reversed(range(len(request["messages"]))):
94+
if request["messages"][i]["role"] == "user":
95+
prompt = request["messages"][i]["content"] # type: ignore
96+
break
97+
if not prompt:
98+
raise ValueError("No user message found in FIM request")
99+
93100
response = await self.client.generate(
94-
model=request["model"], prompt=prompt, stream=stream, options=request["options"] # type: ignore
101+
model=request["model"],
102+
prompt=prompt,
103+
suffix=request.get("suffix", ""),
104+
stream=stream,
105+
options=request["options"] # type: ignore
95106
)
96107
else:
97108
response = await self.client.chat(

0 commit comments

Comments
 (0)