mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama.cpp : split llama_context_params into model and context params (#3301)
* llama.cpp : split llama_context_params into model and context params ggml-ci * fix metal build * fix freq_base/scale default to model value * llama-bench : keep the same model between tests when possible * move n_threads to llama_context_params, add n_threads_batch * fix mpi build * remove kv_size(), cuda scratch fixes * remove low-vram option * add n_threads_batch to system info, refactor to get_system_info() * add documentation about --threads-batch to the READMEs * llama-bench fix * main : fix rope freq/scale warning * llama.cpp : add llama_get_model common : add llama_tokenize from model * remove duplicated ctx/model functions ggml-ci * cuda : print total VRAM used
This commit is contained in:
		| @@ -304,7 +304,7 @@ static void init_model(struct llama_model * input, struct my_llama_model * model | ||||
|  | ||||
|         gguf_free(mctx); | ||||
|     } | ||||
|     hparams.n_vocab = llama_model_n_vocab(input); | ||||
|     hparams.n_vocab = llama_n_vocab(input); | ||||
|     hparams.n_ctx = n_ctx; | ||||
|  | ||||
|     // get tensors from llama_model (possibly mmapped) | ||||
| @@ -1540,12 +1540,14 @@ int main(int argc, char ** argv) { | ||||
|     printf("%s: seed: %u\n", __func__, params.common.seed); | ||||
|     srand(params.common.seed); | ||||
|  | ||||
|     struct llama_context_params llama_params = llama_context_default_params(); | ||||
|     llama_params.vocab_only = false; | ||||
|     struct llama_model_params llama_mparams = llama_model_default_params(); | ||||
|     llama_mparams.vocab_only = false; | ||||
|  | ||||
|     printf("%s: model base = '%s'\n", __func__, params.fn_model_base); | ||||
|     struct llama_model * lmodel = llama_load_model_from_file(params.fn_model_base, llama_params); | ||||
|     struct llama_context * lctx = llama_new_context_with_model(lmodel, llama_params); | ||||
|     struct llama_model * lmodel = llama_load_model_from_file(params.fn_model_base, llama_mparams); | ||||
|  | ||||
|     struct llama_context_params llama_cparams = llama_context_default_params(); | ||||
|     struct llama_context * lctx = llama_new_context_with_model(lmodel, llama_cparams); | ||||
|  | ||||
|     struct my_llama_model model; | ||||
|     init_model(lmodel, &model, params.fn_model_base, params.common.n_ctx); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 slaren
					slaren