mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-02 09:12:03 +00:00
llama : use std::abs instead of abs (#16853)
This commit is contained in:
@@ -2035,7 +2035,7 @@ int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buck
|
|||||||
|
|
||||||
if (bidirectional) {
|
if (bidirectional) {
|
||||||
relative_bucket += (relative_position > 0) * n_buckets;
|
relative_bucket += (relative_position > 0) * n_buckets;
|
||||||
relative_position = abs(relative_position);
|
relative_position = std::abs(relative_position);
|
||||||
} else {
|
} else {
|
||||||
relative_position = -std::min<int32_t>(relative_position, 0);
|
relative_position = -std::min<int32_t>(relative_position, 0);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -653,7 +653,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
|
|||||||
gguf_set_val_f32(ctx_out.get(), o.key, o.val_f64);
|
gguf_set_val_f32(ctx_out.get(), o.key, o.val_f64);
|
||||||
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
|
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
|
||||||
// Setting type to UINT32. See https://github.com/ggml-org/llama.cpp/pull/14182 for context
|
// Setting type to UINT32. See https://github.com/ggml-org/llama.cpp/pull/14182 for context
|
||||||
gguf_set_val_u32(ctx_out.get(), o.key, (uint32_t)abs(o.val_i64));
|
gguf_set_val_u32(ctx_out.get(), o.key, (uint32_t)std::abs(o.val_i64));
|
||||||
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
|
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
|
||||||
gguf_set_val_bool(ctx_out.get(), o.key, o.val_bool);
|
gguf_set_val_bool(ctx_out.get(), o.key, o.val_bool);
|
||||||
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) {
|
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) {
|
||||||
|
|||||||
Reference in New Issue
Block a user