mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : more consistent names of count variables (#5994)
* llama : more consistent names of count variables ggml-ci * llama : n_parallel -> n_seq_max * common : fix param name * examples : fix param name
This commit is contained in:
		| @@ -106,7 +106,7 @@ int main(int argc, char ** argv) { | ||||
|     ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch; | ||||
|  | ||||
|     // ensure enough sequences are available | ||||
|     ctx_params.n_parallel = *std::max_element(n_pl.begin(), n_pl.end()); | ||||
|     ctx_params.n_seq_max = *std::max_element(n_pl.begin(), n_pl.end()); | ||||
|  | ||||
|     llama_context * ctx = llama_new_context_with_model(model, ctx_params); | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov