mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	finetune : add --n-gpu-layers flag info to --help (#4128)
This commit is contained in:
		| @@ -1288,6 +1288,7 @@ static void train_print_usage(int argc, char ** argv, const struct train_params | ||||
|     fprintf(stderr, "  --model-base FNAME         model path from which to load base model (default '%s')\n", params->fn_model_base); | ||||
|     fprintf(stderr, "  --lora-out FNAME           path to save llama lora (default '%s')\n", params->fn_lora_out); | ||||
|     fprintf(stderr, "  --only-write-lora          only save llama lora, don't do any training.  use this if you only want to convert a checkpoint to a lora adapter.\n"); | ||||
|     fprintf(stderr, "  --n-gpu-layers N           Number of model layers to offload to GPU (default 0).\n"); | ||||
|     fprintf(stderr, "  --norm-rms-eps F           RMS-Norm epsilon value (default %f)\n", params->f_norm_rms_eps); | ||||
|     fprintf(stderr, "  --rope-freq-base F         Frequency base for ROPE (default %f)\n", params->rope_freq_base); | ||||
|     fprintf(stderr, "  --rope-freq-scale F        Frequency scale for ROPE (default %f)\n", params->rope_freq_scale); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Clark Saben
					Clark Saben