We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 6aaed11 commit 782b6c2Copy full SHA for 782b6c2
src/llama.cpp
@@ -21808,8 +21808,11 @@ static int32_t llama_chat_apply_template_internal(
21808
// IBM Granite template
21809
for (const auto & message : chat) {
21810
std::string role(message->role);
21811
- ss << "<|start_of_role|>" << role << "<|end_of_role|>"
21812
- << message->content << "<|end_of_text|>\n";
+ ss << "<|start_of_role|>" << role << "<|end_of_role|>";
+ if (role == "assistant_tool_call") {
21813
+ ss << "<|tool_call|>";
21814
+ }
21815
+ ss << message->content << "<|end_of_text|>\n";
21816
}
21817
if (add_ass) {
21818
ss << "<|start_of_role|>assistant<|end_of_role|>\n";
0 commit comments