mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	 305ba6f0e6
			
		
	
	305ba6f0e6
	
	
	
		
			
			* Don't force immediate interactive without -i Sometimes we might want to use a reverse prompt but we want to let the model generate tokens right after the initial prompt. So we don't force user input mode if the -i flag wasn't specified and instead let it run until we encounter the reverse prompt. This gives use some more flexibility, since it doesn't force the user to enter a newline if they want to let the model generate text right after the initial prompt and only be asked for input if the reverse prompt is encountered. The `--interactive-first` flag is reintroduced to force the old behavior. `-r` behaves like `-i` plus introduces a reverse prompt (it can be specified more than once). * Update help output. --------- Co-authored-by: Johnman <tjohnman@github>
		
			
				
	
	
		
			58 lines
		
	
	
		
			1.9 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			58 lines
		
	
	
		
			1.9 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
| // Various helper functions and utilities
 | |
| 
 | |
| #pragma once
 | |
| 
 | |
| #include "llama.h"
 | |
| 
 | |
| #include <string>
 | |
| #include <vector>
 | |
| #include <random>
 | |
| #include <thread>
 | |
| 
 | |
| //
 | |
| // CLI argument parsing
 | |
| //
 | |
| 
 | |
| struct gpt_params {
 | |
|     int32_t seed          = -1;  // RNG seed
 | |
|     int32_t n_threads     = std::min(4, (int32_t) std::thread::hardware_concurrency());
 | |
|     int32_t n_predict     = 128; // new tokens to predict
 | |
|     int32_t repeat_last_n = 64;  // last n tokens to penalize
 | |
|     int32_t n_parts       = -1;  // amount of model parts (-1 = determine from model dimensions)
 | |
|     int32_t n_ctx         = 512; //context size
 | |
| 
 | |
|     // sampling parameters
 | |
|     int32_t top_k = 40;
 | |
|     float   top_p = 0.95f;
 | |
|     float   temp  = 0.80f;
 | |
|     float   repeat_penalty  = 1.10f;
 | |
| 
 | |
|     int32_t n_batch = 8; // batch size for prompt processing
 | |
| 
 | |
|     std::string model  = "models/lamma-7B/ggml-model.bin"; // model path
 | |
|     std::string prompt = "";
 | |
| 
 | |
|     std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
 | |
| 
 | |
|     bool memory_f16        = false; // use f16 instead of f32 for memory kv
 | |
|     bool random_prompt     = false; // do not randomize prompt if none provided
 | |
|     bool use_color         = false; // use color to distinguish generations and inputs
 | |
|     bool interactive       = false; // interactive mode
 | |
|     bool interactive_start = false; // wait for user input immediately
 | |
|     bool instruct          = false; // instruction mode (used for Alpaca models)
 | |
|     bool ignore_eos        = false; // do not stop generating after eos
 | |
|     bool perplexity        = false; // compute perplexity over the prompt
 | |
| };
 | |
| 
 | |
| bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
 | |
| 
 | |
| void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
 | |
| 
 | |
| std::string gpt_random_prompt(std::mt19937 & rng);
 | |
| 
 | |
| //
 | |
| // Vocab utils
 | |
| //
 | |
| 
 | |
| std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos);
 |