-
Notifications
You must be signed in to change notification settings - Fork 12k
llama : adds llama-grammar memoization stacks (#4218) #9833
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
clarismiranda
wants to merge
7
commits into
ggml-org:master
Choose a base branch
from
clarismiranda:grammar-memo
base: master
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from 1 commit
Commits
Show all changes
7 commits
Select commit
Hold shift + click to select a range
cb1632b
llama : adds llama-grammar memorization stacks (#4218)
clarismiranda 901a347
move cache stack to advance stack
clarismiranda 2aa6dd2
add stacks cache into llama_grammar
clarismiranda 17b3a3e
llama : minor llama_grammar refactoring
ggerganov 34fc44d
Merge pull request #1 from ggerganov/gg/grammar-refactor
clarismiranda a33fbbe
Update spelling in memoize
clarismiranda dc68a59
update spelling
clarismiranda File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change | ||||||||
---|---|---|---|---|---|---|---|---|---|---|
|
@@ -682,6 +682,114 @@ static bool llama_grammar_match_partial_char( | |||||||||
return !is_positive_char; | ||||||||||
} | ||||||||||
|
||||||||||
// transforms a grammar pushdown stack into N possible stacks, all ending | ||||||||||
// at a character range (terminal element) | ||||||||||
// additionally memorizes the stack to its possible stacks by mapping | ||||||||||
// < llama_grammar_stack, llama_grammar_stacks > | ||||||||||
|
||||||||||
struct VectorPointerHash { | ||||||||||
size_t operator()(const llama_grammar_stack & v) const { | ||||||||||
size_t seed = v.size(); | ||||||||||
for (const auto* ptr : v) { | ||||||||||
seed ^= std::hash<const llama_grammar_element*>()(ptr) + 0x9e3779b9 + (seed << 6) + (seed >> 2); | ||||||||||
} | ||||||||||
return seed; | ||||||||||
} | ||||||||||
}; | ||||||||||
|
||||||||||
static std::unordered_map< | ||||||||||
llama_grammar_stack, | ||||||||||
llama_grammar_stacks, | ||||||||||
VectorPointerHash> | ||||||||||
llama_grammar_stacks_cache = {}; | ||||||||||
|
||||||||||
ngxson marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||||||
static void llama_grammar_advance_stack_memo( | ||||||||||
const llama_grammar_rules & rules, | ||||||||||
const llama_grammar_stack & stack, | ||||||||||
llama_grammar_stacks & new_stacks); | ||||||||||
|
||||||||||
static void llama_grammar_advance_stack_memo_impl( | ||||||||||
const llama_grammar_rules & rules, | ||||||||||
const llama_grammar_stack & stack, | ||||||||||
llama_grammar_stacks & new_stacks) { | ||||||||||
if (stack.empty()) { | ||||||||||
if (std::find(new_stacks.begin(), new_stacks.end(), stack) == new_stacks.end()) { | ||||||||||
new_stacks.emplace_back(stack); | ||||||||||
} | ||||||||||
return; | ||||||||||
} | ||||||||||
|
||||||||||
const llama_grammar_element * pos = stack.back(); | ||||||||||
|
||||||||||
switch (pos->type) { | ||||||||||
case LLAMA_GRETYPE_RULE_REF: { | ||||||||||
const size_t rule_id = static_cast<size_t>(pos->value); | ||||||||||
const llama_grammar_element * subpos = rules[rule_id].data(); | ||||||||||
do { | ||||||||||
// init new stack without the top (pos) | ||||||||||
llama_grammar_stack new_stack(stack.begin(), stack.end() - 1); | ||||||||||
if (!llama_grammar_is_end_of_sequence(pos + 1)) { | ||||||||||
// if this rule ref is followed by another element, add that to stack | ||||||||||
new_stack.push_back(pos + 1); | ||||||||||
} | ||||||||||
if (!llama_grammar_is_end_of_sequence(subpos)) { | ||||||||||
// if alternate is nonempty, add to stack | ||||||||||
new_stack.push_back(subpos); | ||||||||||
} | ||||||||||
llama_grammar_advance_stack_memo(rules, new_stack, new_stacks); | ||||||||||
while (!llama_grammar_is_end_of_sequence(subpos)) { | ||||||||||
// scan to end of alternate def | ||||||||||
subpos++; | ||||||||||
} | ||||||||||
if (subpos->type == LLAMA_GRETYPE_ALT) { | ||||||||||
// there's another alternate def of this rule to process | ||||||||||
subpos++; | ||||||||||
} else { | ||||||||||
break; | ||||||||||
} | ||||||||||
} while (true); | ||||||||||
break; | ||||||||||
} | ||||||||||
case LLAMA_GRETYPE_CHAR: | ||||||||||
case LLAMA_GRETYPE_CHAR_NOT: | ||||||||||
case LLAMA_GRETYPE_CHAR_ANY: | ||||||||||
if (std::find(new_stacks.begin(), new_stacks.end(), stack) == new_stacks.end()) { | ||||||||||
// only add the stack if it's not a duplicate of one we already have | ||||||||||
new_stacks.emplace_back(stack); | ||||||||||
} | ||||||||||
break; | ||||||||||
default: | ||||||||||
// end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range | ||||||||||
// (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on | ||||||||||
// those | ||||||||||
GGML_ABORT("fatal error"); | ||||||||||
} | ||||||||||
} | ||||||||||
|
||||||||||
static void llama_grammar_advance_stack_memo( | ||||||||||
const llama_grammar_rules & rules, | ||||||||||
const llama_grammar_stack & stack, | ||||||||||
llama_grammar_stacks & new_stacks) { | ||||||||||
|
||||||||||
llama_grammar_stacks advanced_stacks; | ||||||||||
// Look if stack is already in memory | ||||||||||
auto it = llama_grammar_stacks_cache.find(stack); | ||||||||||
if (it != llama_grammar_stacks_cache.end()) { | ||||||||||
advanced_stacks = it->second; | ||||||||||
} else { | ||||||||||
// Advance stacks with memorization | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||
llama_grammar_advance_stack_memo_impl(rules, stack, advanced_stacks); | ||||||||||
llama_grammar_stacks_cache.insert(make_pair(stack, advanced_stacks)); | ||||||||||
} | ||||||||||
// Add the advanced stacks to new_stacks avoiding duplicates | ||||||||||
for (const auto & new_stack : advanced_stacks) { | ||||||||||
if (std::find(new_stacks.begin(), new_stacks.end(), new_stack) == new_stacks.end()) { | ||||||||||
new_stacks.emplace_back(new_stack); | ||||||||||
} | ||||||||||
} | ||||||||||
|
||||||||||
} | ||||||||||
|
||||||||||
// transforms a grammar pushdown stack into N possible stacks, all ending | ||||||||||
// at a character range (terminal element) | ||||||||||
static void llama_grammar_advance_stack( | ||||||||||
|
@@ -844,7 +952,7 @@ void llama_grammar_accept( | |||||||||
if (!llama_grammar_is_end_of_sequence(pos)) { | ||||||||||
new_stack.push_back(pos); | ||||||||||
} | ||||||||||
llama_grammar_advance_stack(rules, new_stack, stacks_new); | ||||||||||
llama_grammar_advance_stack_memo(rules, new_stack, stacks_new); | ||||||||||
} | ||||||||||
} | ||||||||||
} | ||||||||||
|
@@ -911,6 +1019,8 @@ struct llama_grammar * llama_grammar_init_impl( | |||||||||
const llama_grammar_element ** rules, | ||||||||||
size_t n_rules, | ||||||||||
size_t start_rule_index) { | ||||||||||
// Clear stacks cache | ||||||||||
llama_grammar_stacks_cache.clear(); | ||||||||||
const llama_grammar_element * pos; | ||||||||||
|
||||||||||
// copy rule definitions into vectors | ||||||||||
|
@@ -945,7 +1055,7 @@ struct llama_grammar * llama_grammar_init_impl( | |||||||||
// if alternate is nonempty, add to stack | ||||||||||
stack.push_back(pos); | ||||||||||
} | ||||||||||
llama_grammar_advance_stack(vec_rules, stack, stacks); | ||||||||||
llama_grammar_advance_stack_memo(vec_rules, stack, stacks); | ||||||||||
while (!llama_grammar_is_end_of_sequence(pos)) { | ||||||||||
// scan to end of alternate def | ||||||||||
pos++; | ||||||||||
|
@@ -965,6 +1075,8 @@ struct llama_grammar * llama_grammar_init_impl( | |||||||||
} | ||||||||||
|
||||||||||
struct llama_grammar * llama_grammar_init_impl(const struct llama_vocab * vocab, const char * grammar_str, const char * grammar_root) { | ||||||||||
// Clear stacks cache | ||||||||||
llama_grammar_stacks_cache.clear(); | ||||||||||
llama_grammar_parser parser; | ||||||||||
|
||||||||||
// if there is a grammar, parse it | ||||||||||
|
@@ -1023,7 +1135,7 @@ struct llama_grammar * llama_grammar_init_impl(const struct llama_vocab * vocab, | |||||||||
// if alternate is nonempty, add to stack | ||||||||||
stack.push_back(pos); | ||||||||||
} | ||||||||||
llama_grammar_advance_stack(vec_rules, stack, stacks); | ||||||||||
llama_grammar_advance_stack_memo(vec_rules, stack, stacks); | ||||||||||
while (!llama_grammar_is_end_of_sequence(pos)) { | ||||||||||
// scan to end of alternate def | ||||||||||
pos++; | ||||||||||
|
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.