mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-02 09:12:03 +00:00
move cache stack to advance stack
This commit is contained in:
@@ -15,10 +15,11 @@ static bool llama_grammar_validate(struct llama_grammar * grammar, const std::st
|
||||
llama_grammar_stacks & stacks_cur = llama_grammar_get_stacks(grammar);
|
||||
|
||||
size_t pos = 0;
|
||||
llama_grammar_stacks_cache stacks_cache;
|
||||
for (const auto & cpt : cpts) {
|
||||
const llama_grammar_stacks stacks_prev = llama_grammar_get_stacks(grammar); // copy
|
||||
|
||||
llama_grammar_accept(rules, stacks_prev, cpt, stacks_cur);
|
||||
llama_grammar_accept(rules, stacks_prev, cpt, stacks_cur, stacks_cache);
|
||||
|
||||
if (stacks_cur.empty()) {
|
||||
error_pos = pos;
|
||||
|
||||
Reference in New Issue
Block a user