mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	examples/finetune -opt SGD (stochastic gradient descent) memory opt
add unit tested GGML_OPT_OPTIMIZER_SGD to ggml - avoids allocating
m, v tensors.
support finetune.cpp arg -opt SGD (or sgd). (default adamw as before)
llama 3.2-1b-F32 result: observed 11gb gpu ram (41 sec/epoch)
when using SGD instead of 19gb (55 sec/epoch) using adamw.
(wikipedia 100 lines finetune)
(
using the same GPU memory, adamw can only do before OOM 512
batch/context, reaching:
train: [███████▉] data=0000140/0000140 loss=0.02575±0.00099 acc=99.52±0.03% t=00:00:47 ETA=00:00:00
val:   [███████▉] data=0000008/0000008 loss=4.76565±0.28810 acc=41.46±0.77% t=00:00:00 ETA=00:00:00
SGD is superior, though it converges slower, with max before OOM 1728
batch/context (esp see the better validation perf):
train: [███████▉] data=0000039/0000039 loss=0.00371±0.00010 acc=99.96±0.01% t=00:00:41 ETA=00:00:00
val:   [███████▉] data=0000003/0000003 loss=5.11406±0.76034 acc=48.01±0.69% t=00:00:01 ETA=00:00:00
)
note: when finetuning long enough (or w/ enough -lr),
validation accuracy *eventually* drops ('catastrophic forgetting')
-lr-half (halflife) option useful for SGD to avoid oscillation or
super slow underdamped learning (makes setting -lr more forgiving).
terminal -lr for now is set by lr-halvings i.e. if you want at most
1/8 the inital -lr you set -lr-halvings 3.
note: objective loss not directly comparable between adamw, sgd? -
check perplexity or accuracy or consider relative improvements
for convergence
new finetune args -wd 1e-9 to enable weight decay in sgd or adamw,
and max -epochs N (default 2 as before)
cache (1 - wd*alpha) in 'adamw' opt struct -
no noticeable perf benefit, disabled (still done
for new SGD though)
since opt. memory is pre-allocated, the ggml_opt_get_optimizer_params
would probably be able to change between SGD and AdamW with each epoch
but would need to use adamw for the first (unconfirmed - no cmdline arg
to set such a policy yet)
test-opt checks adamw as before and now sgd (except for a few disabled
tests for sgd only; probably just needs logging values and adding
alternate reference values);  tolerance on the 'regression'
test is broader for sgd (so we don't need many more epochs)
			
			
This commit is contained in:
		| @@ -41,6 +41,7 @@ | ||||
| #endif | ||||
| #include <locale> | ||||
| #include <windows.h> | ||||
| #include <string.h> | ||||
| #include <fcntl.h> | ||||
| #include <io.h> | ||||
| #else | ||||
| @@ -1564,3 +1565,53 @@ ggml_opt_dataset_t common_opt_dataset_init(struct llama_context * ctx, const std | ||||
|  | ||||
|     return result; | ||||
| } | ||||
|  | ||||
| ggml_opt_optimizer_params common_opt_lr_pars(void * userdata) { | ||||
|     ggml_opt_optimizer_params result = ggml_opt_get_default_optimizer_params(nullptr); | ||||
|     const lr_opt &            d      = *(lr_opt *) userdata; | ||||
|     result.adamw.alpha = result.sgd.alpha = d.get_lr(d.epoch); | ||||
|     result.sgd.wd = result.adamw.wd = d.wd; | ||||
|     return result; | ||||
| } | ||||
|  | ||||
| static inline bool eq_case_insensitive(char const* a, char const* b) { | ||||
|     return ! | ||||
| #if defined(_MSC_VER) | ||||
|         _stricmp | ||||
| #else | ||||
|         strcasecmp | ||||
| #endif | ||||
|         (a, b); | ||||
| } | ||||
|  | ||||
| enum ggml_opt_optimizer_type common_opt_get_optimizer(const char * n) { | ||||
|     if (eq_case_insensitive("adamw", n)) { | ||||
|         return GGML_OPT_OPTIMIZER_TYPE_ADAMW; | ||||
|     } else if (eq_case_insensitive("sgd", n)) { | ||||
|         return GGML_OPT_OPTIMIZER_TYPE_SGD; | ||||
|     } else { | ||||
|         return GGML_OPT_OPTIMIZER_TYPE_COUNT; | ||||
|     } | ||||
| } | ||||
|  | ||||
| static float const k_log_2 = std::log(2.f); | ||||
|  | ||||
| void lr_opt::init() { | ||||
|     if (lr_min > 0 && lr_min < lr0) { | ||||
|         float nhalf = std::log(lr0 / lr_min) / k_log_2; | ||||
|         float e     = epochs; | ||||
|         if (min_epochs > 0 && min_epochs < e) | ||||
|             e = min_epochs; | ||||
|         else | ||||
|             min_epochs = e; | ||||
|         scale_epoch = nhalf / e; | ||||
|     } | ||||
| } | ||||
|  | ||||
| float lr_opt::get_lr(float epoch) const { | ||||
|     float r = lr_min <= 0 ? lr0 : | ||||
|         epoch >= min_epochs ? lr_min : | ||||
|         lr0 * std::pow(.5, epoch * scale_epoch); | ||||
|     LOG_INF("epoch %.2g lr=%.2g\n", epoch, r); | ||||
|     return r; | ||||
| } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 graehl
					graehl