Merge branch 'master' into compilade/mamba2

This commit is contained in:
Francis Couture-Harpin
2025-06-10 19:22:15 -04:00
581 changed files with 159374 additions and 37809 deletions

View File

@@ -64,12 +64,17 @@
// precomputed f32 table for f16 (256 KB) (ggml-impl.h)
float ggml_table_f32_f16[1 << 16];
#if (defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \
(!defined(TARGET_OS_TV) && !defined(TARGET_OS_WATCH))
#if defined(__linux__) || \
defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \
(defined(__APPLE__) && !TARGET_OS_TV && !TARGET_OS_WATCH)
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
#if defined(__linux__)
#include <sys/prctl.h>
#endif
#if defined(__ANDROID__)
#include <unwind.h>
@@ -128,15 +133,46 @@ static void ggml_print_backtrace_symbols(void) {
}
#endif
static void ggml_print_backtrace(void) {
void ggml_print_backtrace(void) {
const char * GGML_NO_BACKTRACE = getenv("GGML_NO_BACKTRACE");
if (GGML_NO_BACKTRACE) {
return;
}
char attach[32];
snprintf(attach, sizeof(attach), "attach %d", getpid());
int pid = fork();
if (pid == 0) {
#if defined(__linux__)
FILE * f = fopen("/proc/self/status", "r");
size_t size = 0;
char * line = NULL;
ssize_t length = 0;
while ((length = getline(&line, &size, f)) > 0) {
if (!strncmp(line, "TracerPid:", sizeof("TracerPid:") - 1) &&
(length != sizeof("TracerPid:\t0\n") - 1 || line[length - 2] != '0')) {
// Already being debugged, and the breakpoint is the later abort()
free(line);
fclose(f);
return;
}
}
free(line);
fclose(f);
int lock[2] = { -1, -1 };
(void) !pipe(lock); // Don't start gdb until after PR_SET_PTRACER
#endif
const int parent_pid = getpid();
const int child_pid = fork();
if (child_pid < 0) { // error
#if defined(__linux__)
close(lock[1]);
close(lock[0]);
#endif
return;
} else if (child_pid == 0) { // child
char attach[32];
snprintf(attach, sizeof(attach), "attach %d", parent_pid);
#if defined(__linux__)
close(lock[1]);
(void) !read(lock[0], lock, 1);
close(lock[0]);
#endif
// try gdb
execlp("gdb", "gdb", "--batch",
"-ex", "set style enabled on",
@@ -149,22 +185,22 @@ static void ggml_print_backtrace(void) {
execlp("lldb", "lldb", "--batch",
"-o", "bt",
"-o", "quit",
"-p", attach,
"-p", &attach[sizeof("attach ") - 1],
(char *) NULL);
exit(EXIT_FAILURE);
} else {
int wstatus;
waitpid(pid, &wstatus, 0);
if (WIFEXITED(wstatus)) {
if (WEXITSTATUS(wstatus) == EXIT_FAILURE) {
// gdb failed, fallback to backtrace_symbols
ggml_print_backtrace_symbols();
}
}
// gdb failed, fallback to backtrace_symbols
ggml_print_backtrace_symbols();
_Exit(0);
} else { // parent
#if defined(__linux__)
prctl(PR_SET_PTRACER, child_pid);
close(lock[1]);
close(lock[0]);
#endif
waitpid(child_pid, NULL, 0);
}
}
#else
static void ggml_print_backtrace(void) {
void ggml_print_backtrace(void) {
// platform not supported
}
#endif
@@ -185,6 +221,8 @@ void ggml_abort(const char * file, int line, const char * fmt, ...) {
abort();
}
// ggml_print_backtrace is registered with std::set_terminate by ggml.cpp
//
// logging
//
@@ -1068,9 +1106,10 @@ static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = {
"HARDSWISH",
"HARDSIGMOID",
"EXP",
"GELU_ERF",
};
static_assert(GGML_UNARY_OP_COUNT == 14, "GGML_UNARY_OP_COUNT != 14");
static_assert(GGML_UNARY_OP_COUNT == 15, "GGML_UNARY_OP_COUNT != 15");
static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
@@ -1299,6 +1338,10 @@ bool ggml_is_contiguous_2(const struct ggml_tensor * tensor) {
return ggml_is_contiguous_n(tensor, 2);
}
bool ggml_is_contiguously_allocated(const struct ggml_tensor * tensor) {
return ggml_nbytes(tensor) == ggml_nelements(tensor) * ggml_type_size(tensor->type)/ggml_blck_size(tensor->type);
}
bool ggml_is_permuted(const struct ggml_tensor * tensor) {
static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
@@ -2276,6 +2319,26 @@ struct ggml_tensor * ggml_repeat(
return result;
}
struct ggml_tensor * ggml_repeat_4d(
struct ggml_context * ctx,
struct ggml_tensor * a,
int64_t ne0, int64_t ne1, int64_t ne2, int64_t ne3) {
const bool can_repeat = ggml_is_empty(a) || (
(ne0 % a->ne[0] == 0) &&
(ne1 % a->ne[1] == 0) &&
(ne2 % a->ne[2] == 0) &&
(ne3 % a->ne[3] == 0)
);
GGML_ASSERT(can_repeat);
struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3);
result->op = GGML_OP_REPEAT;
result->src[0] = a;
return result;
}
// ggml_repeat_back
struct ggml_tensor * ggml_repeat_back(
@@ -2466,6 +2529,20 @@ struct ggml_tensor * ggml_gelu_inplace(
return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU);
}
// ggml_gelu_erf
struct ggml_tensor * ggml_gelu_erf(
struct ggml_context * ctx,
struct ggml_tensor * a) {
return ggml_unary(ctx, a, GGML_UNARY_OP_GELU_ERF);
}
struct ggml_tensor * ggml_gelu_erf_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a) {
return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_GELU_ERF);
}
// ggml_gelu_quick
struct ggml_tensor * ggml_gelu_quick(
@@ -2728,11 +2805,11 @@ void ggml_mul_mat_set_prec(
c = ggml_mul_mat_id(ctx, as, b, ids);
as -> [cols, rows, n_expert]
ids -> [n_experts_used, n_tokens] (i32)
b -> [cols, n_expert_used, n_tokens]
ids -> [n_expert_used, n_tokens] (i32)
c -> [rows, n_expert_used, n_tokens]
in b, n_experts_used can be broadcasted to match the n_expert_used of ids
in b, n_expert_used can be broadcasted to match the n_expert_used of ids
c ~= as[:,:,i] @ b[:,i%r,t], i = ids[e,t] for all e,t in ids
*/
@@ -5508,7 +5585,7 @@ static void ggml_compute_backward(
// tensor = src0 * 1 + src1 * 0
if (src0_needs_grads) {
// dsrc0 = dtensor * 1
ggml_add_or_set(ctx, cgraph, isrc0, grad);
ggml_add_or_set(ctx, cgraph, isrc0, ggml_reshape(ctx, grad, src0));
}
if (src1_needs_grads) {
// dsrc1 = dtensor * 0 -> noop
@@ -5789,10 +5866,9 @@ void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor *
}
void ggml_build_backward_expand(
struct ggml_context * ctx_static,
struct ggml_context * ctx_compute,
struct ggml_cgraph * cgraph,
bool accumulate) {
struct ggml_context * ctx,
struct ggml_cgraph * cgraph,
struct ggml_tensor ** grad_accs) {
GGML_ASSERT(cgraph->n_nodes > 0);
GGML_ASSERT(cgraph->grads);
GGML_ASSERT(cgraph->grad_accs);
@@ -5865,21 +5941,24 @@ void ggml_build_backward_expand(
GGML_ASSERT(!node->view_src || node->op == GGML_OP_CPY || node->op == GGML_OP_VIEW ||
node->op == GGML_OP_RESHAPE || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_TRANSPOSE);
const size_t igrad = ggml_hash_find(&cgraph->visited_hash_set, node);
GGML_ASSERT(igrad != GGML_HASHSET_FULL);
GGML_ASSERT(ggml_bitset_get(cgraph->visited_hash_set.used, igrad));
if ((accumulate && (node->flags & GGML_TENSOR_FLAG_PARAM)) || (node->flags & GGML_TENSOR_FLAG_LOSS)) {
cgraph->grad_accs[igrad] = ggml_dup_tensor(ctx_static, node);
cgraph->grads[igrad] = cgraph->grad_accs[igrad];
ggml_format_name(cgraph->grad_accs[igrad], "grad acc for %s", node->name);
const size_t ihash = ggml_hash_find(&cgraph->visited_hash_set, node);
GGML_ASSERT(ihash != GGML_HASHSET_FULL);
GGML_ASSERT(ggml_bitset_get(cgraph->visited_hash_set.used, ihash));
if (grad_accs && grad_accs[i]) {
cgraph->grad_accs[ihash] = grad_accs[i];
cgraph->grads[ihash] = cgraph->grad_accs[ihash];
} else if (node->flags & GGML_TENSOR_FLAG_LOSS) {
// loss tensors always need a gradient accumulator
cgraph->grad_accs[ihash] = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, node->ne);
cgraph->grads[ihash] = cgraph->grad_accs[ihash];
}
grads_needed[igrad] = true;
grads_needed[ihash] = true;
}
for (int i = n_nodes_f - 1; i >= 0; --i) {
// inplace operations to add gradients are not created by ggml_compute_backward except for gradient accumulation
// use allocator to automatically make inplace operations
ggml_compute_backward(ctx_compute, cgraph, i, grads_needed);
ggml_compute_backward(ctx, cgraph, i, grads_needed);
}
free(grads_needed);
@@ -6025,8 +6104,8 @@ void ggml_graph_cpy(struct ggml_cgraph * src, struct ggml_cgraph * dst) {
}
}
struct ggml_cgraph * ggml_graph_dup(struct ggml_context * ctx, struct ggml_cgraph * cgraph) {
struct ggml_cgraph * result = ggml_new_graph_custom(ctx, cgraph->size, cgraph->grads != NULL);
struct ggml_cgraph * ggml_graph_dup(struct ggml_context * ctx, struct ggml_cgraph * cgraph, bool force_grads) {
struct ggml_cgraph * result = ggml_new_graph_custom(ctx, cgraph->size, cgraph->grads || force_grads);
ggml_graph_cpy(cgraph, result);
return result;
}
@@ -6045,6 +6124,9 @@ struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor) {
}
void ggml_graph_reset(struct ggml_cgraph * cgraph) {
if (!cgraph) {
return;
}
GGML_ASSERT(cgraph->grads != NULL);
for (int i = 0; i < cgraph->n_nodes; i++) {
@@ -6354,8 +6436,8 @@ void ggml_set_output(struct ggml_tensor * tensor) {
tensor->flags |= GGML_TENSOR_FLAG_OUTPUT;
}
void ggml_set_param(struct ggml_context * ctx, struct ggml_tensor * tensor) {
GGML_UNUSED(ctx); // TODO: remove this parameter
void ggml_set_param(struct ggml_tensor * tensor) {
GGML_ASSERT(tensor->op == GGML_OP_NONE);
tensor->flags |= GGML_TENSOR_FLAG_PARAM;
}