mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	 1442677f92
			
		
	
	1442677f92
	
	
	
		
			
			* common : gpt_params_parse do not print usage * common : rework usage print (wip) * common : valign * common : rework print_usage * infill : remove cfg support * common : reorder args * server : deduplicate parameters ggml-ci * common : add missing header ggml-ci * common : remote --random-prompt usages ggml-ci * examples : migrate to gpt_params ggml-ci * batched-bench : migrate to gpt_params * retrieval : migrate to gpt_params * common : change defaults for escape and n_ctx * common : remove chatml and instruct params ggml-ci * common : passkey use gpt_params
		
			
				
	
	
		
			44 lines
		
	
	
		
			1.1 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			44 lines
		
	
	
		
			1.1 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
| #include "ggml.h"
 | |
| #include "llama.h"
 | |
| #include "common.h"
 | |
| #include "ngram-cache.h"
 | |
| 
 | |
| #include <cstdint>
 | |
| #include <fstream>
 | |
| #include <iostream>
 | |
| #include <string>
 | |
| #include <unordered_map>
 | |
| #include <vector>
 | |
| 
 | |
| int main(int argc, char ** argv){
 | |
|     gpt_params params;
 | |
| 
 | |
|     if (!gpt_params_parse(argc, argv, params)) {
 | |
|         gpt_params_print_usage(argc, argv, params);
 | |
|         return 1;
 | |
|     }
 | |
| 
 | |
|     // init llama.cpp
 | |
|     llama_backend_init();
 | |
|     llama_numa_init(params.numa);
 | |
| 
 | |
|     llama_model * model = NULL;
 | |
|     llama_context * ctx = NULL;
 | |
| 
 | |
|     // load the model
 | |
|     std::tie(model, ctx) = llama_init_from_gpt_params(params);
 | |
|     GGML_ASSERT(model != nullptr);
 | |
| 
 | |
|     // tokenize the prompt
 | |
|     std::vector<llama_token> inp;
 | |
|     inp = ::llama_tokenize(ctx, params.prompt, true, true);
 | |
|     fprintf(stderr, "%s: tokenization done\n", __func__);
 | |
| 
 | |
| 
 | |
|     llama_ngram_cache ngram_cache;
 | |
|     llama_ngram_cache_update(ngram_cache, LLAMA_NGRAM_STATIC, LLAMA_NGRAM_STATIC, inp, inp.size(), true);
 | |
|     fprintf(stderr, "%s: hashing done, writing file to %s\n", __func__, params.lookup_cache_static.c_str());
 | |
| 
 | |
|     llama_ngram_cache_save(ngram_cache, params.lookup_cache_static);
 | |
| }
 |