mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-13 10:57:15 +00:00
suggestions from coderabbit
This commit is contained in:
@@ -2210,7 +2210,7 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev,
|
||||
case GGML_OP_COUNT_EQUAL:
|
||||
return true;
|
||||
case GGML_OP_SCALE:
|
||||
float bias = ((const float *)(dst->op_params))[1];
|
||||
float bias = ((const float *)(op->op_params))[1];
|
||||
return bias == 0.0f; // TODO: support bias != 0.0f
|
||||
case GGML_OP_SOFT_MAX:
|
||||
// TODO: support broadcast
|
||||
|
||||
@@ -404,13 +404,13 @@ inline static void ggml_vec_mad1_f32(const int n, float * y, const float s, cons
|
||||
|
||||
// leftovers
|
||||
for (int i = np; i < n; ++i) {
|
||||
y[i] = y[i]*s + b;
|
||||
y[i] = y[i]*s + b;
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
// scalar
|
||||
for (int i = 0; i < n; ++i) {
|
||||
y[i] *= y[i]*s + b;
|
||||
y[i] = y[i]*s + b;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user