mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-01 09:01:57 +00:00
move cache stack to advance stack
This commit is contained in:
@@ -35,10 +35,11 @@ static bool match_string(const std::string & input, llama_grammar * grammar) {
|
||||
const llama_grammar_rules & rules = llama_grammar_get_rules (grammar);
|
||||
llama_grammar_stacks & stacks_cur = llama_grammar_get_stacks(grammar);
|
||||
|
||||
llama_grammar_stacks_cache stacks_cache;
|
||||
for (const auto & cpt : cpts) {
|
||||
const llama_grammar_stacks stacks_prev = llama_grammar_get_stacks(grammar); // copy
|
||||
|
||||
llama_grammar_accept(rules, stacks_prev, cpt, stacks_cur);
|
||||
llama_grammar_accept(rules, stacks_prev, cpt, stacks_cur, stacks_cache);
|
||||
|
||||
if (stacks_cur.empty()) {
|
||||
// no stacks means that the grammar failed to match at this point
|
||||
|
||||
Reference in New Issue
Block a user