mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	metal : pad n_ctx by 32 (#6177)
* metal : require ne00 >= 128 for mat-mat kernels ggml-ci * llama : pad n_ctx by 32 ggml-ci
This commit is contained in:
		| @@ -13044,6 +13044,9 @@ struct llama_context * llama_new_context_with_model( | ||||
|     cparams.rope_freq_base   = params.rope_freq_base  == 0.0f ? hparams.rope_freq_base_train  : params.rope_freq_base; | ||||
|     cparams.rope_freq_scale  = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale; | ||||
|  | ||||
|     // this is necessary due to kv_self.n being padded later during inference | ||||
|     cparams.n_ctx = GGML_PAD(cparams.n_ctx, 32); | ||||
|  | ||||
|     // with causal attention, the batch size is limited by the context size | ||||
|     cparams.n_batch          = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch; | ||||
|     cparams.n_ubatch         = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov