mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-05 09:36:52 +00:00
metal : fix comments [no ci]
This commit is contained in:
@@ -3,7 +3,7 @@
|
||||
|
||||
// kernel parameters for mat-vec threadgroups
|
||||
//
|
||||
// N_R0: number of src0 rows to process per threadgroup
|
||||
// N_R0: number of src0 rows to process per simdgroup
|
||||
// N_SG: number of simdgroups per threadgroup
|
||||
//
|
||||
// TODO: for optimal performance, become function of the device and work size
|
||||
|
||||
@@ -2883,7 +2883,7 @@ static void ggml_metal_encode_node(
|
||||
id<MTLComputePipelineState> pipeline = nil;
|
||||
|
||||
int nsg = 0; // number of simdgroups
|
||||
int nr0 = 0; // number of src0 rows per threadgroup
|
||||
int nr0 = 0; // number of src0 rows per simdgroup
|
||||
int nr1 = 1; // number of src1 rows per threadgroup
|
||||
|
||||
size_t smem = 0; // shared memory
|
||||
|
||||
Reference in New Issue
Block a user