mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* `main`/`server`: rename to `llama` / `llama-server` for consistency w/ homebrew
* server: update refs -> llama-server
gitignore llama-server
* server: simplify nix package
* main: update refs -> llama
fix examples/main ref
* main/server: fix targets
* update more names
* Update build.yml
* rm accidentally checked in bins
* update straggling refs
* Update .gitignore
* Update server-llm.sh
* main: target name -> llama-cli
* Prefix all example bins w/ llama-
* fix main refs
* rename {main->llama}-cmake-pkg binary
* prefix more cmake targets w/ llama-
* add/fix gbnf-validator subfolder to cmake
* sort cmake example subdirs
* rm bin files
* fix llama-lookup-* Makefile rules
* gitignore /llama-*
* rename Dockerfiles
* rename llama|main -> llama-cli; consistent RPM bin prefixes
* fix some missing -cli suffixes
* rename dockerfile w/ llama-cli
* rename(make): llama-baby-llama
* update dockerfile refs
* more llama-cli(.exe)
* fix test-eval-callback
* rename: llama-cli-cmake-pkg(.exe)
* address gbnf-validator unused fread warning (switched to C++ / ifstream)
* add two missing llama- prefixes
* Updating docs for eval-callback binary to use new `llama-` prefix.
* Updating a few lingering doc references for rename of main to llama-cli
* Updating `run-with-preset.py` to use new binary names.
Updating docs around `perplexity` binary rename.
* Updating documentation references for lookup-merge and export-lora
* Updating two small `main` references missed earlier in the finetune docs.
* Update apps.nix
* update grammar/README.md w/ new llama-* names
* update llama-rpc-server bin name + doc
* Revert "update llama-rpc-server bin name + doc"
This reverts commit e474ef1df4.
* add hot topic notice to README.md
* Update README.md
* Update README.md
* rename gguf-split & quantize bins refs in **/tests.sh
---------
Co-authored-by: HanClinto <hanclinto@gmail.com>
		
	
		
			
				
	
	
		
			131 lines
		
	
	
		
			4.1 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			131 lines
		
	
	
		
			4.1 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
#define LLAMA_API_INTERNAL
 | 
						|
 | 
						|
#include "grammar-parser.h"
 | 
						|
#include "ggml.h"
 | 
						|
#include "llama.h"
 | 
						|
#include "unicode.h"
 | 
						|
 | 
						|
#include <cstdio>
 | 
						|
#include <cstdlib>
 | 
						|
#include <sstream>
 | 
						|
#include <fstream>
 | 
						|
#include <string>
 | 
						|
#include <vector>
 | 
						|
 | 
						|
static bool llama_sample_grammar_string(struct llama_grammar * grammar, const std::string & input_str, size_t & error_pos, std::string & error_msg) {
 | 
						|
    auto decoded = decode_utf8(input_str, {});
 | 
						|
    const auto & code_points = decoded.first;
 | 
						|
 | 
						|
    size_t pos = 0;
 | 
						|
    for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
 | 
						|
        auto prev_stacks = grammar->stacks;
 | 
						|
        llama_grammar_accept(grammar->rules, prev_stacks, *it, grammar->stacks);
 | 
						|
        if (grammar->stacks.empty()) {
 | 
						|
            error_pos = pos;
 | 
						|
            error_msg = "Unexpected character '" + unicode_cpt_to_utf8(*it) + "'";
 | 
						|
            grammar->stacks = prev_stacks;
 | 
						|
            return false;
 | 
						|
        }
 | 
						|
        ++pos;
 | 
						|
    }
 | 
						|
 | 
						|
    for (const auto & stack : grammar->stacks) {
 | 
						|
        if (stack.empty()) {
 | 
						|
            return true;
 | 
						|
        }
 | 
						|
    }
 | 
						|
 | 
						|
    error_pos = pos;
 | 
						|
    error_msg = "Unexpected end of input";
 | 
						|
    return false;
 | 
						|
}
 | 
						|
 | 
						|
static void print_error_message(const std::string & input_str, size_t error_pos, const std::string & error_msg) {
 | 
						|
    fprintf(stdout, "Input string is invalid according to the grammar.\n");
 | 
						|
    fprintf(stdout, "Error: %s at position %zu\n", error_msg.c_str(), error_pos);
 | 
						|
    fprintf(stdout, "\n");
 | 
						|
    fprintf(stdout, "Input string:\n");
 | 
						|
    fprintf(stdout, "%s", input_str.substr(0, error_pos).c_str());
 | 
						|
    if (error_pos < input_str.size()) {
 | 
						|
        fprintf(stdout, "\033[1;31m%c", input_str[error_pos]);
 | 
						|
        if (error_pos+1 < input_str.size()) {
 | 
						|
            fprintf(stdout, "\033[0;31m%s", input_str.substr(error_pos+1).c_str());
 | 
						|
        }
 | 
						|
        fprintf(stdout, "\033[0m\n");
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
int main(int argc, char** argv) {
 | 
						|
    if (argc != 3) {
 | 
						|
        fprintf(stdout, "Usage: %s <grammar_filename> <input_filename>\n", argv[0]);
 | 
						|
        return 1;
 | 
						|
    }
 | 
						|
 | 
						|
    const std::string grammar_filename = argv[1];
 | 
						|
    const std::string input_filename = argv[2];
 | 
						|
 | 
						|
    // Read the GBNF grammar file
 | 
						|
    FILE* grammar_file = fopen(grammar_filename.c_str(), "r");
 | 
						|
    if (!grammar_file) {
 | 
						|
        fprintf(stdout, "Failed to open grammar file: %s\n", grammar_filename.c_str());
 | 
						|
        return 1;
 | 
						|
    }
 | 
						|
 | 
						|
    std::string grammar_str;
 | 
						|
    {
 | 
						|
        std::ifstream grammar_file(grammar_filename);
 | 
						|
        GGML_ASSERT(grammar_file.is_open() && "Failed to open grammar file");
 | 
						|
        std::stringstream buffer;
 | 
						|
        buffer << grammar_file.rdbuf();
 | 
						|
        grammar_str = buffer.str();
 | 
						|
    }
 | 
						|
 | 
						|
    // Parse the GBNF grammar
 | 
						|
    auto parsed_grammar = grammar_parser::parse(grammar_str.c_str());
 | 
						|
 | 
						|
    // will be empty (default) if there are parse errors
 | 
						|
    if (parsed_grammar.rules.empty()) {
 | 
						|
        fprintf(stdout, "%s: failed to parse grammar\n", __func__);
 | 
						|
        return 1;
 | 
						|
    }
 | 
						|
 | 
						|
    // Ensure that there is a "root" node.
 | 
						|
    if (parsed_grammar.symbol_ids.find("root") == parsed_grammar.symbol_ids.end()) {
 | 
						|
        fprintf(stdout, "%s: grammar does not contain a 'root' symbol\n", __func__);
 | 
						|
        return 1;
 | 
						|
    }
 | 
						|
 | 
						|
    std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
 | 
						|
 | 
						|
    // Create the LLAMA grammar
 | 
						|
    auto grammar = llama_grammar_init(
 | 
						|
            grammar_rules.data(),
 | 
						|
            grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
 | 
						|
 | 
						|
    // Read the input file
 | 
						|
    std::string input_str;
 | 
						|
    {
 | 
						|
        std::ifstream input_file(input_filename);
 | 
						|
        GGML_ASSERT(input_file.is_open() && "Failed to open input file");
 | 
						|
        std::stringstream buffer;
 | 
						|
        buffer << input_file.rdbuf();
 | 
						|
        input_str = buffer.str();
 | 
						|
    }
 | 
						|
 | 
						|
    // Validate the input string against the grammar
 | 
						|
    size_t error_pos;
 | 
						|
    std::string error_msg;
 | 
						|
    bool is_valid = llama_sample_grammar_string(grammar, input_str, error_pos, error_msg);
 | 
						|
 | 
						|
    if (is_valid) {
 | 
						|
        fprintf(stdout, "Input string is valid according to the grammar.\n");
 | 
						|
    } else {
 | 
						|
        print_error_message(input_str, error_pos, error_msg);
 | 
						|
    }
 | 
						|
 | 
						|
    // Clean up
 | 
						|
    llama_grammar_free(grammar);
 | 
						|
 | 
						|
    return 0;
 | 
						|
}
 |