mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	ggml : upgrade init_tensor API to return a ggml_status (#11854)
* Upgrade init_tensor API to return a ggml_status To prepare for an 'abort-free' ggml (ggml not to abort on OOMs but return a OOM status), as agreeed with Diego in the ggml repo, upgrade the init_tensor() and view_init() APIs to return a ggml_status. * misc fixes --------- Co-authored-by: slaren <slarengh@gmail.com>
This commit is contained in:
		 William Tambellini
					William Tambellini
				
			
				
					committed by
					
						 GitHub
						GitHub
					
				
			
			
				
	
			
			
			 GitHub
						GitHub
					
				
			
						parent
						
							c43a3e7996
						
					
				
				
					commit
					70680c48e5
				
			| @@ -1211,7 +1211,7 @@ static void * ggml_backend_opencl_buffer_get_base(ggml_backend_buffer_t buffer) | ||||
|     GGML_UNUSED(buffer); | ||||
| } | ||||
|  | ||||
| static void ggml_backend_opencl_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { | ||||
| static enum ggml_status ggml_backend_opencl_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { | ||||
|     ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; | ||||
|  | ||||
|     ggml_cl2_init(buffer->buft->device); | ||||
| @@ -1251,6 +1251,7 @@ static void ggml_backend_opencl_buffer_init_tensor(ggml_backend_buffer_t buffer, | ||||
|             tensor->extra = extra; | ||||
|         } | ||||
|     } | ||||
|     return GGML_STATUS_SUCCESS; | ||||
| } | ||||
|  | ||||
| // The optimized gemm and gemv kernels are used for large matrices without batch. | ||||
|   | ||||
		Reference in New Issue
	
	Block a user