mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* (wip) argparser v3 * migrated * add test * handle env * fix linux build * add export-docs example * fix build (2) * skip build test-arg-parser on windows * update server docs * bring back missing --alias * bring back --n-predict * clarify test-arg-parser * small correction * add comments * fix args with 2 values * refine example-specific args * no more lamba capture Co-authored-by: slaren@users.noreply.github.com * params.sparams * optimize more * export-docs --> gen-docs
		
			
				
	
	
		
			44 lines
		
	
	
		
			1.2 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			44 lines
		
	
	
		
			1.2 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
#include "ggml.h"
 | 
						|
#include "llama.h"
 | 
						|
#include "common.h"
 | 
						|
#include "ngram-cache.h"
 | 
						|
 | 
						|
#include <cstdint>
 | 
						|
#include <fstream>
 | 
						|
#include <iostream>
 | 
						|
#include <string>
 | 
						|
#include <unordered_map>
 | 
						|
#include <vector>
 | 
						|
 | 
						|
int main(int argc, char ** argv){
 | 
						|
    gpt_params params;
 | 
						|
 | 
						|
    auto options = gpt_params_parser_init(params, LLAMA_EXAMPLE_COMMON);
 | 
						|
    if (!gpt_params_parse(argc, argv, params, options)) {
 | 
						|
        return 1;
 | 
						|
    }
 | 
						|
 | 
						|
    // init llama.cpp
 | 
						|
    llama_backend_init();
 | 
						|
    llama_numa_init(params.numa);
 | 
						|
 | 
						|
    // load the model
 | 
						|
    llama_init_result llama_init = llama_init_from_gpt_params(params);
 | 
						|
 | 
						|
    llama_model * model = llama_init.model;
 | 
						|
    llama_context * ctx = llama_init.context;
 | 
						|
    GGML_ASSERT(model != nullptr);
 | 
						|
 | 
						|
    // tokenize the prompt
 | 
						|
    std::vector<llama_token> inp;
 | 
						|
    inp = ::llama_tokenize(ctx, params.prompt, true, true);
 | 
						|
    fprintf(stderr, "%s: tokenization done\n", __func__);
 | 
						|
 | 
						|
 | 
						|
    llama_ngram_cache ngram_cache;
 | 
						|
    llama_ngram_cache_update(ngram_cache, LLAMA_NGRAM_STATIC, LLAMA_NGRAM_STATIC, inp, inp.size(), true);
 | 
						|
    fprintf(stderr, "%s: hashing done, writing file to %s\n", __func__, params.lookup_cache_static.c_str());
 | 
						|
 | 
						|
    llama_ngram_cache_save(ngram_cache, params.lookup_cache_static);
 | 
						|
}
 |