mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	finetune: SGD optimizer, more CLI args (#13873)
* examples/finetune -opt SGD (stochastic gradient descent) memory opt
add unit tested GGML_OPT_OPTIMIZER_SGD to ggml - avoids allocating
m, v tensors.
support finetune.cpp arg -opt SGD (or sgd). (default adamw as before)
llama 3.2-1b-F32 result: observed 11gb gpu ram (41 sec/epoch)
when using SGD instead of 19gb (55 sec/epoch) using adamw.
(wikipedia 100 lines finetune)
(
using the same GPU memory, adamw can only do before OOM 512
batch/context, reaching:
train: [███████▉] data=0000140/0000140 loss=0.02575±0.00099 acc=99.52±0.03% t=00:00:47 ETA=00:00:00
val:   [███████▉] data=0000008/0000008 loss=4.76565±0.28810 acc=41.46±0.77% t=00:00:00 ETA=00:00:00
SGD is superior, though it converges slower, with max before OOM 1728
batch/context (esp see the better validation perf):
train: [███████▉] data=0000039/0000039 loss=0.00371±0.00010 acc=99.96±0.01% t=00:00:41 ETA=00:00:00
val:   [███████▉] data=0000003/0000003 loss=5.11406±0.76034 acc=48.01±0.69% t=00:00:01 ETA=00:00:00
)
note: when finetuning long enough (or w/ enough -lr),
validation accuracy *eventually* drops ('catastrophic forgetting')
-lr-half (halflife) option useful for SGD to avoid oscillation or
super slow underdamped learning (makes setting -lr more forgiving).
terminal -lr for now is set by lr-halvings i.e. if you want at most
1/8 the inital -lr you set -lr-halvings 3.
note: objective loss not directly comparable between adamw, sgd? -
check perplexity or accuracy or consider relative improvements
for convergence
new finetune args -wd 1e-9 to enable weight decay in sgd or adamw,
and max -epochs N (default 2 as before)
cache (1 - wd*alpha) in 'adamw' opt struct -
no noticeable perf benefit, disabled (still done
for new SGD though)
since opt. memory is pre-allocated, the ggml_opt_get_optimizer_params
would probably be able to change between SGD and AdamW with each epoch
but would need to use adamw for the first (unconfirmed - no cmdline arg
to set such a policy yet)
test-opt checks adamw as before and now sgd (except for a few disabled
tests for sgd only; probably just needs logging values and adding
alternate reference values);  tolerance on the 'regression'
test is broader for sgd (so we don't need many more epochs)
* Vulkan: Implement GGML_OP_OPT_STEP_SGD
* tests: Fix OPT_STEP_SGD test-backend-ops
* SGD op param store weight-decay and not 1-alpha*wd
* minor + cosmetic changes
* fix vulkan sgd
* try CI fix
---------
Co-authored-by: 0cc4m <picard12@live.de>
Co-authored-by: Johannes Gäßler <johannesg@5d6.de>
			
			
This commit is contained in:
		@@ -1012,11 +1012,12 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
 | 
			
		||||
    "CROSS_ENTROPY_LOSS",
 | 
			
		||||
    "CROSS_ENTROPY_LOSS_BACK",
 | 
			
		||||
    "OPT_STEP_ADAMW",
 | 
			
		||||
    "OPT_STEP_SGD",
 | 
			
		||||
 | 
			
		||||
    "GLU",
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static_assert(GGML_OP_COUNT == 87, "GGML_OP_COUNT != 87");
 | 
			
		||||
static_assert(GGML_OP_COUNT == 88, "GGML_OP_COUNT != 88");
 | 
			
		||||
 | 
			
		||||
static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
 | 
			
		||||
    "none",
 | 
			
		||||
@@ -1113,15 +1114,15 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
 | 
			
		||||
    "cross_entropy_loss(x,y)",
 | 
			
		||||
    "cross_entropy_loss_back(x,y)",
 | 
			
		||||
    "adamw(x)",
 | 
			
		||||
    "sgd(x)",
 | 
			
		||||
 | 
			
		||||
    "glu(x)",
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static_assert(GGML_OP_COUNT == 87, "GGML_OP_COUNT != 87");
 | 
			
		||||
static_assert(GGML_OP_COUNT == 88, "GGML_OP_COUNT != 88");
 | 
			
		||||
 | 
			
		||||
static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = {
 | 
			
		||||
    "ABS",
 | 
			
		||||
    "SGN",
 | 
			
		||||
@@ -5606,6 +5607,28 @@ struct ggml_tensor * ggml_opt_step_adamw(
 | 
			
		||||
    return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// opt_step_sgd
 | 
			
		||||
 | 
			
		||||
struct ggml_tensor * ggml_opt_step_sgd(
 | 
			
		||||
        struct ggml_context * ctx,
 | 
			
		||||
        struct ggml_tensor  * a,
 | 
			
		||||
        struct ggml_tensor  * grad,
 | 
			
		||||
        struct ggml_tensor  * params) {
 | 
			
		||||
    GGML_ASSERT(a->flags & GGML_TENSOR_FLAG_PARAM);
 | 
			
		||||
    GGML_ASSERT(ggml_are_same_shape(a, grad));
 | 
			
		||||
    GGML_ASSERT(params->type == GGML_TYPE_F32);
 | 
			
		||||
    GGML_ASSERT(ggml_nelements(params) == 2);
 | 
			
		||||
 | 
			
		||||
    struct ggml_tensor * result = ggml_view_tensor(ctx, a);
 | 
			
		||||
 | 
			
		||||
    result->op     = GGML_OP_OPT_STEP_SGD;
 | 
			
		||||
    result->src[0] = a;
 | 
			
		||||
    result->src[1] = grad;
 | 
			
		||||
    result->src[2] = params;
 | 
			
		||||
 | 
			
		||||
    return result;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
////////////////////////////////////////////////////////////////////////////////
 | 
			
		||||
 | 
			
		||||
struct ggml_hash_set ggml_hash_set_new(size_t size) {
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user