mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	Making requested review changes
This commit is contained in:
		@@ -223,9 +223,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
 | 
			
		||||
    fprintf(stderr, "  --interactive-first   run in interactive mode and wait for input right away\n");
 | 
			
		||||
    fprintf(stderr, "  -ins, --instruct      run in instruction mode (use with Alpaca models)\n");
 | 
			
		||||
    fprintf(stderr, "  -r PROMPT, --reverse-prompt PROMPT\n");
 | 
			
		||||
    fprintf(stderr, "                        specify a PROMPT that will cause generation to stop\n");
 | 
			
		||||
    fprintf(stderr, "                        if running interactive, poll user input upon seeing PROMPT (can be\n");
 | 
			
		||||
    fprintf(stderr, "                        specified more than once for multiple prompts).\n");
 | 
			
		||||
    fprintf(stderr, "                        halt generation at PROMPT, return control in interactive mode\n");
 | 
			
		||||
    fprintf(stderr, "                        (can be specified more than once for multiple prompts).\n");
 | 
			
		||||
    fprintf(stderr, "  --color               colorise output to distinguish prompt and user input from generations\n");
 | 
			
		||||
    fprintf(stderr, "  -s SEED, --seed SEED  RNG seed (default: -1, use random seed for <= 0)\n");
 | 
			
		||||
    fprintf(stderr, "  -t N, --threads N     number of threads to use during computation (default: %d)\n", params.n_threads);
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user