mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama.cpp : split llama_context_params into model and context params (#3301)
* llama.cpp : split llama_context_params into model and context params ggml-ci * fix metal build * fix freq_base/scale default to model value * llama-bench : keep the same model between tests when possible * move n_threads to llama_context_params, add n_threads_batch * fix mpi build * remove kv_size(), cuda scratch fixes * remove low-vram option * add n_threads_batch to system info, refactor to get_system_info() * add documentation about --threads-batch to the READMEs * llama-bench fix * main : fix rope freq/scale warning * llama.cpp : add llama_get_model common : add llama_tokenize from model * remove duplicated ctx/model functions ggml-ci * cuda : print total VRAM used
This commit is contained in:
		| @@ -160,7 +160,7 @@ int main(int argc, char ** argv) | ||||
|  | ||||
|     int n_past = 0; | ||||
|  | ||||
|     if (llama_decode(ctx, llama_batch_get_one(tokens_list.data(), tokens_list.size(), n_past, 0), params.n_threads)) | ||||
|     if (llama_decode(ctx, llama_batch_get_one(tokens_list.data(), tokens_list.size(), n_past, 0))) | ||||
|     { | ||||
|         fprintf(stderr, "%s : failed to eval prompt.\n" , __func__ ); | ||||
|         return 1; | ||||
| @@ -170,7 +170,7 @@ int main(int argc, char ** argv) | ||||
|     beam_search_callback_data callback_data{ctx, {}}; | ||||
|     size_t const beam_width = static_cast<size_t>(params.n_beams); | ||||
|     int const n_predict = 256; | ||||
|     llama_beam_search(ctx, beam_search_callback, &callback_data, beam_width, n_past, n_predict, params.n_threads); | ||||
|     llama_beam_search(ctx, beam_search_callback, &callback_data, beam_width, n_past, n_predict); | ||||
|  | ||||
|     std::cout << "\n\n"; | ||||
|     for (llama_token const token_id : callback_data.response) { | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 slaren
					slaren