mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	finetune: SGD optimizer, more CLI args (#13873)
* examples/finetune -opt SGD (stochastic gradient descent) memory opt
add unit tested GGML_OPT_OPTIMIZER_SGD to ggml - avoids allocating
m, v tensors.
support finetune.cpp arg -opt SGD (or sgd). (default adamw as before)
llama 3.2-1b-F32 result: observed 11gb gpu ram (41 sec/epoch)
when using SGD instead of 19gb (55 sec/epoch) using adamw.
(wikipedia 100 lines finetune)
(
using the same GPU memory, adamw can only do before OOM 512
batch/context, reaching:
train: [███████▉] data=0000140/0000140 loss=0.02575±0.00099 acc=99.52±0.03% t=00:00:47 ETA=00:00:00
val:   [███████▉] data=0000008/0000008 loss=4.76565±0.28810 acc=41.46±0.77% t=00:00:00 ETA=00:00:00
SGD is superior, though it converges slower, with max before OOM 1728
batch/context (esp see the better validation perf):
train: [███████▉] data=0000039/0000039 loss=0.00371±0.00010 acc=99.96±0.01% t=00:00:41 ETA=00:00:00
val:   [███████▉] data=0000003/0000003 loss=5.11406±0.76034 acc=48.01±0.69% t=00:00:01 ETA=00:00:00
)
note: when finetuning long enough (or w/ enough -lr),
validation accuracy *eventually* drops ('catastrophic forgetting')
-lr-half (halflife) option useful for SGD to avoid oscillation or
super slow underdamped learning (makes setting -lr more forgiving).
terminal -lr for now is set by lr-halvings i.e. if you want at most
1/8 the inital -lr you set -lr-halvings 3.
note: objective loss not directly comparable between adamw, sgd? -
check perplexity or accuracy or consider relative improvements
for convergence
new finetune args -wd 1e-9 to enable weight decay in sgd or adamw,
and max -epochs N (default 2 as before)
cache (1 - wd*alpha) in 'adamw' opt struct -
no noticeable perf benefit, disabled (still done
for new SGD though)
since opt. memory is pre-allocated, the ggml_opt_get_optimizer_params
would probably be able to change between SGD and AdamW with each epoch
but would need to use adamw for the first (unconfirmed - no cmdline arg
to set such a policy yet)
test-opt checks adamw as before and now sgd (except for a few disabled
tests for sgd only; probably just needs logging values and adding
alternate reference values);  tolerance on the 'regression'
test is broader for sgd (so we don't need many more epochs)
* Vulkan: Implement GGML_OP_OPT_STEP_SGD
* tests: Fix OPT_STEP_SGD test-backend-ops
* SGD op param store weight-decay and not 1-alpha*wd
* minor + cosmetic changes
* fix vulkan sgd
* try CI fix
---------
Co-authored-by: 0cc4m <picard12@live.de>
Co-authored-by: Johannes Gäßler <johannesg@5d6.de>
			
			
This commit is contained in:
		| @@ -1238,6 +1238,7 @@ bool common_params_parse(int argc, char ** argv, common_params & params, llama_e | ||||
|             common_params_print_completion(ctx_arg); | ||||
|             exit(0); | ||||
|         } | ||||
|         params.lr.init(); | ||||
|     } catch (const std::invalid_argument & ex) { | ||||
|         fprintf(stderr, "%s\n", ex.what()); | ||||
|         ctx_arg.params = params_org; | ||||
| @@ -2688,7 +2689,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex | ||||
|         [](common_params & params, const std::string & value) { | ||||
|             params.out_file = value; | ||||
|         } | ||||
|     ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA, LLAMA_EXAMPLE_TTS})); | ||||
|     ).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_CVECTOR_GENERATOR, LLAMA_EXAMPLE_EXPORT_LORA, LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_FINETUNE})); | ||||
|     add_opt(common_arg( | ||||
|         {"-ofreq", "--output-frequency"}, "N", | ||||
|         string_format("output the imatrix every N iterations (default: %d)", params.n_out_freq), | ||||
| @@ -3566,5 +3567,51 @@ common_params_context common_params_parser_init(common_params & params, llama_ex | ||||
|     ).set_examples({ LLAMA_EXAMPLE_DIFFUSION })); | ||||
|  | ||||
|  | ||||
|     add_opt( | ||||
|         common_arg({ "-lr", "--learning-rate" }, "ALPHA", | ||||
|                    string_format( | ||||
|                        "adamw or sgd optimizer alpha (default: %.2g); note: sgd alpha recommended ~10x (no momentum)", | ||||
|                        (double) params.lr.lr0), | ||||
|                    [](common_params & params, const std::string & value) { params.lr.lr0 = std::stof(value); }) | ||||
|             .set_examples({ LLAMA_EXAMPLE_FINETUNE })); | ||||
|     add_opt( | ||||
|         common_arg({ "-lr-min", "--learning-rate-min" }, "ALPHA", | ||||
|                    string_format( | ||||
|                        "(if >0) final learning rate after decay (if -decay-epochs is set, default=%.2g)", | ||||
|                        (double) params.lr.lr_min), | ||||
|                    [](common_params & params, const std::string & value) { params.lr.lr_min = std::stof(value); }) | ||||
|             .set_examples({ LLAMA_EXAMPLE_FINETUNE })); | ||||
|     add_opt( | ||||
|         common_arg({ "-decay-epochs", "--learning-rate-decay-epochs" }, "ALPHA", | ||||
|                    string_format( | ||||
|                        "(if >0) decay learning rate to -lr-min after this many epochs (exponential decay, default=%.2g)", | ||||
|                        (double) params.lr.decay_epochs), | ||||
|                    [](common_params & params, const std::string & value) { params.lr.decay_epochs = std::stof(value); }) | ||||
|             .set_examples({ LLAMA_EXAMPLE_FINETUNE })); | ||||
|     add_opt(common_arg( | ||||
|                 { "-wd", "--weight-decay" }, "WD", | ||||
|                 string_format( | ||||
|                     "adamw or sgd optimizer weight decay (0 is off; recommend very small e.g. 1e-9) (default: %.2g).", | ||||
|                     (double) params.lr.wd), | ||||
|                 [](common_params & params, const std::string & value) { params.lr.wd = std::stof(value); }) | ||||
|                 .set_examples({ LLAMA_EXAMPLE_FINETUNE })); | ||||
|     add_opt(common_arg({ "-val-split", "--val-split" }, "FRACTION", | ||||
|                        string_format("fraction of data to use as validation set for training (default: %.2g).", | ||||
|                                      (double) params.val_split), | ||||
|                        [](common_params & params, const std::string & value) { params.val_split = std::stof(value); }) | ||||
|                 .set_examples({ LLAMA_EXAMPLE_FINETUNE })); | ||||
|     add_opt(common_arg({ "-epochs", "--epochs" }, "N", | ||||
|                        string_format("optimizer max # of epochs (default: %d)", params.lr.epochs), | ||||
|                        [](common_params & params, int epochs) { params.lr.epochs = epochs; }) | ||||
|                 .set_examples({ LLAMA_EXAMPLE_FINETUNE })); | ||||
|     add_opt(common_arg({ "-opt", "--optimizer" }, "sgd|adamw", "adamw or sgd", | ||||
|                        [](common_params & params, const std::string & name) { | ||||
|                            params.optimizer = common_opt_get_optimizer(name.c_str()); | ||||
|                            if (params.optimizer == GGML_OPT_OPTIMIZER_TYPE_COUNT) { | ||||
|                                throw std::invalid_argument("invalid --optimizer, valid options: adamw, sgd"); | ||||
|                            } | ||||
|                        }) | ||||
|                 .set_examples({ LLAMA_EXAMPLE_FINETUNE })); | ||||
|  | ||||
|     return ctx_arg; | ||||
| } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Jonathan Graehl
					Jonathan Graehl