mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-29 08:41:22 +00:00 
			
		
		
		
	 0cc63754b8
			
		
	
	0cc63754b8
	
	
	
		
			
			It's like simple-chat but it uses smart pointers to avoid manual memory cleanups. Less memory leaks in the code now. Avoid printing multiple dots. Split code into smaller functions. Uses no exception handling. Signed-off-by: Eric Curtin <ecurtin@redhat.com>
		
			
				
	
	
		
			26 lines
		
	
	
		
			676 B
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			26 lines
		
	
	
		
			676 B
		
	
	
	
		
			C++
		
	
	
	
	
	
| #pragma once
 | |
| 
 | |
| #ifndef __cplusplus
 | |
| #error "This header is for C++ only"
 | |
| #endif
 | |
| 
 | |
| #include <memory>
 | |
| 
 | |
| #include "llama.h"
 | |
| 
 | |
| struct llama_model_deleter {
 | |
|     void operator()(llama_model * model) { llama_free_model(model); }
 | |
| };
 | |
| 
 | |
| struct llama_context_deleter {
 | |
|     void operator()(llama_context * context) { llama_free(context); }
 | |
| };
 | |
| 
 | |
| struct llama_sampler_deleter {
 | |
|     void operator()(llama_sampler * sampler) { llama_sampler_free(sampler); }
 | |
| };
 | |
| 
 | |
| typedef std::unique_ptr<llama_model, llama_model_deleter> llama_model_ptr;
 | |
| typedef std::unique_ptr<llama_context, llama_context_deleter> llama_context_ptr;
 | |
| typedef std::unique_ptr<llama_sampler, llama_sampler_deleter> llama_sampler_ptr;
 |