mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	metal : fix indent (ggml/0)
This commit is contained in:
		
							
								
								
									
										28
									
								
								ggml-metal.m
									
									
									
									
									
								
							
							
						
						
									
										28
									
								
								ggml-metal.m
									
									
									
									
									
								
							| @@ -1195,24 +1195,24 @@ static enum ggml_status ggml_metal_graph_compute( | |||||||
|                         [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; |                         [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; | ||||||
|                     } break; |                     } break; | ||||||
|                 case GGML_OP_CLAMP: |                 case GGML_OP_CLAMP: | ||||||
|                 { |                     { | ||||||
|                     id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CLAMP].pipeline; |                         id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CLAMP].pipeline; | ||||||
|  |  | ||||||
|                     float min; |                         float min; | ||||||
|                     float max; |                         float max; | ||||||
|                     memcpy(&min, ((int32_t *) dst->op_params) + 0, sizeof(float)); |                         memcpy(&min, ((int32_t *) dst->op_params) + 0, sizeof(float)); | ||||||
|                     memcpy(&max, ((int32_t *) dst->op_params) + 1, sizeof(float)); |                         memcpy(&max, ((int32_t *) dst->op_params) + 1, sizeof(float)); | ||||||
|  |  | ||||||
|                     [encoder setComputePipelineState:pipeline]; |                         [encoder setComputePipelineState:pipeline]; | ||||||
|                     [encoder setBuffer:id_src0   offset:offs_src0 atIndex:0]; |                         [encoder setBuffer:id_src0   offset:offs_src0 atIndex:0]; | ||||||
|                     [encoder setBuffer:id_dst    offset:offs_dst  atIndex:1]; |                         [encoder setBuffer:id_dst    offset:offs_dst  atIndex:1]; | ||||||
|                     [encoder setBytes:&min length:sizeof(min) atIndex:2]; |                         [encoder setBytes:&min length:sizeof(min) atIndex:2]; | ||||||
|                     [encoder setBytes:&max length:sizeof(max) atIndex:3]; |                         [encoder setBytes:&max length:sizeof(max) atIndex:3]; | ||||||
|  |  | ||||||
|                     const int64_t n = ggml_nelements(dst); |                         const int64_t n = ggml_nelements(dst); | ||||||
|  |  | ||||||
|                     [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; |                         [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; | ||||||
|                 } break; |                     } break; | ||||||
|                 case GGML_OP_UNARY: |                 case GGML_OP_UNARY: | ||||||
|                     switch (ggml_get_unary_op(gf->nodes[i])) { |                     switch (ggml_get_unary_op(gf->nodes[i])) { | ||||||
|                         // we are not taking into account the strides, so for now require contiguous tensors |                         // we are not taking into account the strides, so for now require contiguous tensors | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov