mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-28 08:31:25 +00:00 
			
		
		
		
	 afa8a9ec9b
			
		
	
	afa8a9ec9b
	
	
	
		
			
			* llama : functions -> methods (#11110) * llama : add struct llama_vocab to the API (#11156) ggml-ci * hparams : move vocab params to llama_vocab (#11159) ggml-ci * vocab : more pimpl (#11165) ggml-ci * vocab : minor tokenization optimizations (#11160) ggml-ci Co-authored-by: Diego Devesa <slarengh@gmail.com> * lora : update API names (#11167) ggml-ci * llama : update API names to use correct prefix (#11174) * llama : update API names to use correct prefix ggml-ci * cont ggml-ci * cont ggml-ci * minor [no ci] * vocab : llama_vocab_add_[be]os -> llama_vocab_get_add_[be]os (#11174) ggml-ci * vocab : llama_vocab_n_vocab -> llama_vocab_n_tokens (#11174) ggml-ci --------- Co-authored-by: Diego Devesa <slarengh@gmail.com>
		
			
				
	
	
		
			25 lines
		
	
	
		
			712 B
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			25 lines
		
	
	
		
			712 B
		
	
	
	
		
			C++
		
	
	
	
	
	
| // ref: https://github.com/ggerganov/llama.cpp/issues/4952#issuecomment-1892864763
 | |
| 
 | |
| #include <cstdio>
 | |
| #include <string>
 | |
| #include <thread>
 | |
| 
 | |
| #include "llama.h"
 | |
| #include "get-model.h"
 | |
| 
 | |
| // This creates a new context inside a pthread and then tries to exit cleanly.
 | |
| int main(int argc, char ** argv) {
 | |
|     auto * model_path = get_model_or_exit(argc, argv);
 | |
| 
 | |
|     std::thread([&model_path]() {
 | |
|         llama_backend_init();
 | |
|         auto * model = llama_model_load_from_file(model_path, llama_model_default_params());
 | |
|         auto * ctx = llama_init_from_model(model, llama_context_default_params());
 | |
|         llama_free(ctx);
 | |
|         llama_model_free(model);
 | |
|         llama_backend_free();
 | |
|     }).join();
 | |
| 
 | |
|     return 0;
 | |
| }
 |