mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-28 08:31:25 +00:00 
			
		
		
		
	common : print that one line of the syntax help *also* to standard output (#3823)
This commit is contained in:
		| @@ -743,7 +743,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { | |||||||
| #endif // GGML_USE_CUBLAS | #endif // GGML_USE_CUBLAS | ||||||
| #endif | #endif | ||||||
|     printf("  --verbose-prompt      print prompt before generation\n"); |     printf("  --verbose-prompt      print prompt before generation\n"); | ||||||
|     fprintf(stderr, "  --simple-io           use basic IO for better compatibility in subprocesses and limited consoles\n"); |     printf("  --simple-io           use basic IO for better compatibility in subprocesses and limited consoles\n"); | ||||||
|     printf("  --lora FNAME          apply LoRA adapter (implies --no-mmap)\n"); |     printf("  --lora FNAME          apply LoRA adapter (implies --no-mmap)\n"); | ||||||
|     printf("  --lora-scaled FNAME S apply LoRA adapter with user defined scaling S (implies --no-mmap)\n"); |     printf("  --lora-scaled FNAME S apply LoRA adapter with user defined scaling S (implies --no-mmap)\n"); | ||||||
|     printf("  --lora-base FNAME     optional model to use as a base for the layers modified by the LoRA adapter\n"); |     printf("  --lora-base FNAME     optional model to use as a base for the layers modified by the LoRA adapter\n"); | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Henk Poley
					Henk Poley