mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-27 08:21:30 +00:00
ggml : split graph allocations according to backend max buffer size (#15815)
* ggml : make gallocr respect the backend's max buffer size * if the graph requires more memory than can fit into a single allocation, split it into multiple backend buffers * vulkan: report the actual max allocation size in buffer type interface * fix missing newline, apple-clang warning * track size of individual chunks in ggml_dyn_tallocr and raise max chunks. revert to use suballocation_block_size as max chunk size for vulkan. * track (chunk, offset) pairs instead of "global" offsets through gallocr. * simpler, don't need loops to map between local/global offsets * touches more code * fix dyn_tallocr_max_size and initialization * fix memory leak when buffers are reused due to same buffer type appearing multiple times * make vbuffer allocation follow the same logic as backend_buffer did before * continue to use leftover unallocated space of previous chunks after a new one has been created * treat free blocks of each chunk as separate list * they're still allocated together, but start/end of each chunk is tracked, and allocate/free iterate over sub-ranges * exhaust freed blocks of all chunks before considering their last blocks with unallocated space * start with 0 chunks/blocks and create chunks as needed * allow the last chunk to grow beyond max size * refactor: move adding new free block and new chunk into separate functions * allocate chunks individually with a separate free-blocks list for each one * needs a bit more memory/allocations/indirections, but code is simpler * fix warnings (missing static) & debug checks
This commit is contained in:
@@ -23,7 +23,7 @@ static bool ggml_is_view(const struct ggml_tensor * t) {
|
||||
}
|
||||
|
||||
// ops that return true for this function must not use restrict pointers for their backend implementations
|
||||
static bool ggml_op_can_inplace(enum ggml_op op) {
|
||||
bool ggml_op_can_inplace(enum ggml_op op) {
|
||||
switch (op) {
|
||||
case GGML_OP_SCALE:
|
||||
case GGML_OP_DIAG_MASK_ZERO:
|
||||
@@ -95,39 +95,104 @@ enum ggml_status ggml_tallocr_alloc(struct ggml_tallocr * talloc, struct ggml_te
|
||||
|
||||
// dynamic tensor allocator
|
||||
|
||||
#define GGML_VBUFFER_MAX_CHUNKS 16
|
||||
|
||||
// relative memory address within an allocation that can be split into multiple buffers (chunks)
|
||||
struct buffer_address {
|
||||
int chunk; // index of a backend buffer
|
||||
size_t offset; // local memory offset within the buffer
|
||||
};
|
||||
|
||||
static const struct buffer_address GGML_BUFFER_ADDRESS_INVALID = { -1, SIZE_MAX };
|
||||
|
||||
static bool ggml_buffer_address_less(struct buffer_address a, struct buffer_address b) {
|
||||
return a.chunk != b.chunk ? a.chunk < b.chunk : a.offset < b.offset;
|
||||
}
|
||||
|
||||
struct free_block {
|
||||
size_t offset;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
struct tallocr_chunk {
|
||||
struct free_block free_blocks[MAX_FREE_BLOCKS];
|
||||
int n_free_blocks;
|
||||
size_t max_size;
|
||||
};
|
||||
|
||||
struct ggml_dyn_tallocr {
|
||||
size_t alignment;
|
||||
int n_free_blocks;
|
||||
struct free_block free_blocks[MAX_FREE_BLOCKS];
|
||||
size_t max_size;
|
||||
size_t max_chunk_size;
|
||||
struct tallocr_chunk * chunks[GGML_VBUFFER_MAX_CHUNKS];
|
||||
int n_chunks;
|
||||
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
struct {
|
||||
const struct ggml_tensor * tensor;
|
||||
size_t offset;
|
||||
struct buffer_address addr;
|
||||
} allocated_tensors[1024];
|
||||
#endif
|
||||
};
|
||||
|
||||
static void ggml_dyn_tallocr_insert_block(struct tallocr_chunk * chunk, size_t offset, size_t size) {
|
||||
GGML_ASSERT(chunk->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
|
||||
// insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
|
||||
int insert_pos = 0;
|
||||
while (insert_pos < chunk->n_free_blocks && chunk->free_blocks[insert_pos].offset < offset) {
|
||||
insert_pos++;
|
||||
}
|
||||
// shift all blocks from insert_pos onward to make room for the new block
|
||||
for (int i = chunk->n_free_blocks; i > insert_pos; i--) {
|
||||
chunk->free_blocks[i] = chunk->free_blocks[i-1];
|
||||
}
|
||||
// insert the new block
|
||||
chunk->free_blocks[insert_pos].offset = offset;
|
||||
chunk->free_blocks[insert_pos].size = size;
|
||||
chunk->n_free_blocks++;
|
||||
}
|
||||
|
||||
static void ggml_dyn_tallocr_remove_block(struct tallocr_chunk * chunk, int idx) {
|
||||
// shift all elements after idx by 1 to the left, overwriting the element at idx
|
||||
for (int i = idx; i < chunk->n_free_blocks; i++) {
|
||||
chunk->free_blocks[i] = chunk->free_blocks[i+1];
|
||||
}
|
||||
chunk->n_free_blocks--;
|
||||
}
|
||||
|
||||
static int ggml_dyn_tallocr_new_chunk(struct ggml_dyn_tallocr * alloc, size_t min_size) {
|
||||
if (alloc->n_chunks >= GGML_VBUFFER_MAX_CHUNKS) {
|
||||
return -1;
|
||||
}
|
||||
struct tallocr_chunk * chunk = calloc(1, sizeof(struct tallocr_chunk));
|
||||
chunk->n_free_blocks = 1;
|
||||
chunk->free_blocks[0].offset = 0;
|
||||
// available space in a chunk is limited to max_chunk_size, but can be higher if:
|
||||
// 1. a single tensor exceeds the maximum, and cannot fit any other way
|
||||
// 2. we are running out of chunks
|
||||
// backends will either manage to allocate the larger size, or report an error.
|
||||
chunk->free_blocks[0].size = MAX(min_size, alloc->max_chunk_size);
|
||||
if (alloc->n_chunks == GGML_VBUFFER_MAX_CHUNKS - 1) {
|
||||
chunk->free_blocks[0].size = SIZE_MAX/2;
|
||||
}
|
||||
alloc->chunks[alloc->n_chunks] = chunk;
|
||||
alloc->n_chunks++;
|
||||
return alloc->n_chunks - 1;
|
||||
}
|
||||
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
static void add_allocated_tensor(struct ggml_dyn_tallocr * alloc, size_t offset, const struct ggml_tensor * tensor) {
|
||||
static void add_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) {
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
if (alloc->allocated_tensors[i].tensor == NULL) {
|
||||
alloc->allocated_tensors[i].tensor = tensor;
|
||||
alloc->allocated_tensors[i].offset = offset;
|
||||
alloc->allocated_tensors[i].addr = addr;
|
||||
return;
|
||||
}
|
||||
}
|
||||
GGML_ABORT("out of allocated_tensors");
|
||||
}
|
||||
static void remove_allocated_tensor(struct ggml_dyn_tallocr * alloc, size_t offset, const struct ggml_tensor * tensor) {
|
||||
static void remove_allocated_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, const struct ggml_tensor * tensor) {
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
if (alloc->allocated_tensors[i].offset == offset) {
|
||||
if (alloc->allocated_tensors[i].addr.chunk == addr.chunk && alloc->allocated_tensors[i].addr.offset == addr.offset) {
|
||||
alloc->allocated_tensors[i].tensor = NULL;
|
||||
return;
|
||||
}
|
||||
@@ -136,76 +201,94 @@ static void remove_allocated_tensor(struct ggml_dyn_tallocr * alloc, size_t offs
|
||||
}
|
||||
#endif
|
||||
|
||||
static size_t ggml_dyn_tallocr_alloc(struct ggml_dyn_tallocr * alloc, size_t size, const struct ggml_tensor * tensor) {
|
||||
static struct buffer_address ggml_dyn_tallocr_alloc(struct ggml_dyn_tallocr * alloc, size_t size, const struct ggml_tensor * tensor) {
|
||||
size = aligned_offset(NULL, size, alloc->alignment);
|
||||
|
||||
AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
|
||||
|
||||
int best_fit_chunk = -1;
|
||||
int best_fit_block = -1;
|
||||
size_t max_avail = 0;
|
||||
|
||||
// find the best fitting free block besides the last block
|
||||
int best_fit_block = -1;
|
||||
size_t best_fit_size = SIZE_MAX;
|
||||
for (int i = 0; i < alloc->n_free_blocks - 1; i++) {
|
||||
struct free_block * block = &alloc->free_blocks[i];
|
||||
max_avail = MAX(max_avail, block->size);
|
||||
if (block->size >= size && block->size <= best_fit_size) {
|
||||
best_fit_block = i;
|
||||
best_fit_size = block->size;
|
||||
// find the best fitting free block besides the last block, within any chunk
|
||||
for (int c = 0; c < alloc->n_chunks; ++c) {
|
||||
struct tallocr_chunk * chunk = alloc->chunks[c];
|
||||
size_t best_fit_size = SIZE_MAX;
|
||||
for (int i = 0; i < chunk->n_free_blocks - 1; i++) {
|
||||
struct free_block * block = &chunk->free_blocks[i];
|
||||
max_avail = MAX(max_avail, block->size);
|
||||
if (block->size >= size && block->size <= best_fit_size) {
|
||||
best_fit_chunk = c;
|
||||
best_fit_block = i;
|
||||
best_fit_size = block->size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (best_fit_block == -1) {
|
||||
// the last block is our last resort
|
||||
struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1];
|
||||
max_avail = MAX(max_avail, block->size);
|
||||
if (block->size >= size) {
|
||||
best_fit_block = alloc->n_free_blocks - 1;
|
||||
} else {
|
||||
// this should never happen
|
||||
GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
|
||||
__func__, size, max_avail);
|
||||
GGML_ABORT("not enough space in the buffer");
|
||||
}
|
||||
}
|
||||
|
||||
struct free_block * block = &alloc->free_blocks[best_fit_block];
|
||||
size_t offset = block->offset;
|
||||
block->offset = offset + size;
|
||||
block->size -= size;
|
||||
if (block->size == 0) {
|
||||
// remove block if empty
|
||||
alloc->n_free_blocks--;
|
||||
for (int j = best_fit_block; j < alloc->n_free_blocks; j++) {
|
||||
alloc->free_blocks[j] = alloc->free_blocks[j+1];
|
||||
}
|
||||
}
|
||||
|
||||
AT_PRINTF("block %d, offset %zu\n", best_fit_block, offset);
|
||||
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
add_allocated_tensor(alloc, offset, tensor);
|
||||
size_t cur_max = offset + size;
|
||||
if (cur_max > alloc->max_size) {
|
||||
// sort allocated_tensors by offset
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
for (int j = i + 1; j < 1024; j++) {
|
||||
if (alloc->allocated_tensors[i].offset > alloc->allocated_tensors[j].offset) {
|
||||
const struct ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor;
|
||||
size_t tmp_offset = alloc->allocated_tensors[i].offset;
|
||||
alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor;
|
||||
alloc->allocated_tensors[i].offset = alloc->allocated_tensors[j].offset;
|
||||
alloc->allocated_tensors[j].tensor = tmp_tensor;
|
||||
alloc->allocated_tensors[j].offset = tmp_offset;
|
||||
// no suitable block found, try the last block (this will grow a chunks size)
|
||||
for (int c = 0; c < alloc->n_chunks; ++c) {
|
||||
struct tallocr_chunk * chunk = alloc->chunks[c];
|
||||
if (chunk->n_free_blocks > 0) {
|
||||
struct free_block * block = &chunk->free_blocks[chunk->n_free_blocks - 1];
|
||||
max_avail = MAX(max_avail, block->size);
|
||||
if (block->size >= size) {
|
||||
best_fit_chunk = c;
|
||||
best_fit_block = chunk->n_free_blocks - 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
GGML_LOG_DEBUG("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
|
||||
}
|
||||
|
||||
if (best_fit_block == -1) {
|
||||
// none of the existing chunks have enough space left
|
||||
best_fit_chunk = ggml_dyn_tallocr_new_chunk(alloc, size);
|
||||
best_fit_block = 0;
|
||||
}
|
||||
if (best_fit_chunk == -1) {
|
||||
// since the last chunk always has virtually endless memory, this should never happen
|
||||
GGML_LOG_ERROR("%s: not enough space in the buffer to allocate %zu bytes, largest block available %zu bytes\n",
|
||||
__func__, size, max_avail);
|
||||
GGML_ABORT("graph allocation: failed to reserve memory");
|
||||
}
|
||||
|
||||
struct tallocr_chunk * chunk = alloc->chunks[best_fit_chunk];
|
||||
struct free_block * block = &chunk->free_blocks[best_fit_block];
|
||||
struct buffer_address addr = {.chunk = best_fit_chunk, .offset = block->offset };
|
||||
block->offset += size;
|
||||
block->size -= size;
|
||||
if (block->size == 0) {
|
||||
// remove block if empty
|
||||
ggml_dyn_tallocr_remove_block(chunk, best_fit_block);
|
||||
}
|
||||
|
||||
AT_PRINTF("block %d, offset %zu, chunk %d\n", best_fit_block, addr.offset, addr.chunk);
|
||||
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
add_allocated_tensor(alloc, addr, tensor);
|
||||
size_t cur_max = addr.offset + size;
|
||||
if (cur_max > alloc->max_size[addr.chunk]) {
|
||||
// sort allocated_tensors by chunk/offset
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
for (int j = i + 1; j < 1024; j++) {
|
||||
if (ggml_buffer_address_less(alloc->allocated_tensors[j].addr, alloc->allocated_tensors[i].addr)) {
|
||||
const struct ggml_tensor * tmp_tensor = alloc->allocated_tensors[i].tensor;
|
||||
struct buffer_address tmp_addr = alloc->allocated_tensors[i].addr;
|
||||
alloc->allocated_tensors[i].tensor = alloc->allocated_tensors[j].tensor;
|
||||
alloc->allocated_tensors[i].addr = alloc->allocated_tensors[j].addr;
|
||||
alloc->allocated_tensors[j].tensor = tmp_tensor;
|
||||
alloc->allocated_tensors[j].addr = tmp_addr;
|
||||
}
|
||||
}
|
||||
}
|
||||
GGML_LOG_DEBUG("max_size[%d] = %.2f MB: tensors: ", addr.chunk, cur_max / 1024.0 / 1024.0);
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
if (alloc->allocated_tensors[i].tensor) {
|
||||
GGML_LOG_DEBUG("%s [%zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
|
||||
alloc->allocated_tensors[i].offset,
|
||||
alloc->allocated_tensors[i].offset + ggml_nbytes(alloc->allocated_tensors[i].tensor),
|
||||
GGML_LOG_DEBUG("%s [%d: %zx-%zx] (%.2f MB) ", alloc->allocated_tensors[i].tensor->name,
|
||||
alloc->allocated_tensors[i].addr.chunk,
|
||||
alloc->allocated_tensors[i].addr.offset,
|
||||
alloc->allocated_tensors[i].addr.offset + ggml_nbytes(alloc->allocated_tensors[i].tensor),
|
||||
ggml_nbytes(alloc->allocated_tensors[i].tensor) / 1024.0 / 1024.0);
|
||||
}
|
||||
}
|
||||
@@ -213,78 +296,69 @@ static size_t ggml_dyn_tallocr_alloc(struct ggml_dyn_tallocr * alloc, size_t siz
|
||||
}
|
||||
#endif
|
||||
|
||||
alloc->max_size = MAX(alloc->max_size, offset + size);
|
||||
chunk->max_size = MAX(chunk->max_size, addr.offset + size);
|
||||
|
||||
return offset;
|
||||
return addr;
|
||||
|
||||
GGML_UNUSED(tensor);
|
||||
}
|
||||
|
||||
// this is a very naive implementation, but for our case the number of free blocks should be very small
|
||||
static void ggml_dyn_tallocr_free_tensor(struct ggml_dyn_tallocr * alloc, size_t offset, size_t size, const struct ggml_tensor * tensor) {
|
||||
static void ggml_dyn_tallocr_free_tensor(struct ggml_dyn_tallocr * alloc, struct buffer_address addr, size_t size, const struct ggml_tensor * tensor) {
|
||||
size = aligned_offset(NULL, size, alloc->alignment);
|
||||
|
||||
AT_PRINTF("%s: freeing %s at %zu (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, offset, size, alloc->n_free_blocks);
|
||||
AT_PRINTF("%s: freeing %s at {chunk=%d, offset=%zu} (%zu bytes) - n_free_blocks = %d\n",
|
||||
__func__, tensor->name, addr.chunk, addr.offset, size, alloc->chunks[addr.chunk]->n_free_blocks);
|
||||
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
remove_allocated_tensor(alloc, offset, tensor);
|
||||
remove_allocated_tensor(alloc, addr, tensor);
|
||||
#endif
|
||||
|
||||
struct tallocr_chunk * chunk = alloc->chunks[addr.chunk];
|
||||
|
||||
// see if we can merge with an existing block
|
||||
for (int i = 0; i < alloc->n_free_blocks; i++) {
|
||||
struct free_block * block = &alloc->free_blocks[i];
|
||||
for (int i = 0; i < chunk->n_free_blocks; i++) {
|
||||
struct free_block * block = &chunk->free_blocks[i];
|
||||
// check if ptr is at the end of the block
|
||||
if (block->offset + block->size == offset) {
|
||||
if (block->offset + block->size == addr.offset) {
|
||||
block->size += size;
|
||||
// check if we can merge with the next block
|
||||
if (i < alloc->n_free_blocks - 1 && block->offset + block->size == alloc->free_blocks[i+1].offset) {
|
||||
block->size += alloc->free_blocks[i+1].size;
|
||||
alloc->n_free_blocks--;
|
||||
for (int j = i+1; j < alloc->n_free_blocks; j++) {
|
||||
alloc->free_blocks[j] = alloc->free_blocks[j+1];
|
||||
if (i < chunk->n_free_blocks - 1) {
|
||||
struct free_block * next = &chunk->free_blocks[i+1];
|
||||
if (block->offset + block->size == next->offset) {
|
||||
block->size += next->size;
|
||||
ggml_dyn_tallocr_remove_block(chunk, i+1);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
// check if ptr is at the beginning of the block
|
||||
if (offset + size == block->offset) {
|
||||
block->offset = offset;
|
||||
if (addr.offset + size == block->offset) {
|
||||
block->offset = addr.offset;
|
||||
block->size += size;
|
||||
// check if we can merge with the previous block
|
||||
if (i > 0 && alloc->free_blocks[i-1].offset + alloc->free_blocks[i-1].size == block->offset) {
|
||||
alloc->free_blocks[i-1].size += block->size;
|
||||
alloc->n_free_blocks--;
|
||||
for (int j = i; j < alloc->n_free_blocks; j++) {
|
||||
alloc->free_blocks[j] = alloc->free_blocks[j+1];
|
||||
if (i > 0) {
|
||||
struct free_block * prev = &chunk->free_blocks[i-1];
|
||||
if (prev->offset + prev->size == block->offset) {
|
||||
prev->size += block->size;
|
||||
ggml_dyn_tallocr_remove_block(chunk, i);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
// otherwise, add a new block
|
||||
GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
|
||||
// insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
|
||||
int insert_pos = 0;
|
||||
while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].offset < offset) {
|
||||
insert_pos++;
|
||||
}
|
||||
// shift all blocks from insert_pos onward to make room for the new block
|
||||
for (int i = alloc->n_free_blocks; i > insert_pos; i--) {
|
||||
alloc->free_blocks[i] = alloc->free_blocks[i-1];
|
||||
}
|
||||
// insert the new block
|
||||
alloc->free_blocks[insert_pos].offset = offset;
|
||||
alloc->free_blocks[insert_pos].size = size;
|
||||
alloc->n_free_blocks++;
|
||||
ggml_dyn_tallocr_insert_block(chunk, addr.offset, size);
|
||||
|
||||
GGML_UNUSED(tensor);
|
||||
}
|
||||
|
||||
static void ggml_dyn_tallocr_reset(struct ggml_dyn_tallocr * alloc) {
|
||||
alloc->n_free_blocks = 1;
|
||||
alloc->free_blocks[0].offset = 0;
|
||||
alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
|
||||
alloc->max_size = 0;
|
||||
for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; i++) {
|
||||
free(alloc->chunks[i]);
|
||||
alloc->chunks[i] = NULL;
|
||||
}
|
||||
alloc->n_chunks = 0;
|
||||
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
for (int i = 0; i < 1024; i++) {
|
||||
@@ -293,14 +367,14 @@ static void ggml_dyn_tallocr_reset(struct ggml_dyn_tallocr * alloc) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct ggml_dyn_tallocr * ggml_dyn_tallocr_new(size_t alignment) {
|
||||
static struct ggml_dyn_tallocr * ggml_dyn_tallocr_new(size_t alignment, size_t max_buffer_size) {
|
||||
struct ggml_dyn_tallocr * alloc = (struct ggml_dyn_tallocr *)malloc(sizeof(struct ggml_dyn_tallocr));
|
||||
|
||||
*alloc = (struct ggml_dyn_tallocr) {
|
||||
/*.alignment = */ alignment,
|
||||
/*.n_free_blocks = */ 0,
|
||||
/*.free_blocks = */ {{0}},
|
||||
/*.max_size = */ 0,
|
||||
/*.alignment = */ alignment,
|
||||
/*.max_chunk_size = */ MIN(max_buffer_size, SIZE_MAX/2), // clamp to avoid overflows
|
||||
/*.chunks = */ {NULL},
|
||||
/*.n_chunks = */ 0,
|
||||
#ifdef GGML_ALLOCATOR_DEBUG
|
||||
/*.allocated_tensors = */ {{0}},
|
||||
#endif
|
||||
@@ -312,11 +386,79 @@ static struct ggml_dyn_tallocr * ggml_dyn_tallocr_new(size_t alignment) {
|
||||
}
|
||||
|
||||
static void ggml_dyn_tallocr_free(struct ggml_dyn_tallocr * alloc) {
|
||||
for (int i = 0; i < alloc->n_chunks; ++i) {
|
||||
free(alloc->chunks[i]);
|
||||
}
|
||||
free(alloc);
|
||||
}
|
||||
|
||||
static size_t ggml_dyn_tallocr_max_size(struct ggml_dyn_tallocr * alloc) {
|
||||
return alloc->max_size;
|
||||
size_t max_size = 0;
|
||||
for (int i = 0; i < alloc->n_chunks; i++) {
|
||||
max_size += alloc->chunks[i]->max_size;
|
||||
}
|
||||
return max_size;
|
||||
}
|
||||
|
||||
|
||||
// virtual buffer with contiguous memory range, split into multiple backend buffers (chunks)
|
||||
|
||||
struct vbuffer {
|
||||
ggml_backend_buffer_t chunks[GGML_VBUFFER_MAX_CHUNKS];
|
||||
};
|
||||
|
||||
static void ggml_vbuffer_free(struct vbuffer * buf) {
|
||||
if (buf == NULL) {
|
||||
return;
|
||||
}
|
||||
for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS; ++i) {
|
||||
ggml_backend_buffer_free(buf->chunks[i]);
|
||||
}
|
||||
free(buf);
|
||||
}
|
||||
|
||||
static int ggml_vbuffer_n_chunks(struct vbuffer * buf) {
|
||||
int n = 0;
|
||||
while (n < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[n]) n++;
|
||||
return n;
|
||||
}
|
||||
|
||||
static size_t ggml_vbuffer_size(struct vbuffer * buf) {
|
||||
size_t size = 0;
|
||||
for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) {
|
||||
size += ggml_backend_buffer_get_size(buf->chunks[i]);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
static struct vbuffer * ggml_vbuffer_alloc(ggml_backend_buffer_type_t buft, const struct ggml_dyn_tallocr * talloc, enum ggml_backend_buffer_usage usage) {
|
||||
struct vbuffer * buf = (struct vbuffer *)calloc(1, sizeof(struct vbuffer));
|
||||
if (buf == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (int n = 0; n < talloc->n_chunks; n++) {
|
||||
size_t chunk_size = talloc->chunks[n]->max_size;
|
||||
buf->chunks[n] = ggml_backend_buft_alloc_buffer(buft, chunk_size);
|
||||
if (buf->chunks[n] == NULL) {
|
||||
ggml_vbuffer_free(buf);
|
||||
return NULL;
|
||||
}
|
||||
ggml_backend_buffer_set_usage(buf->chunks[n], usage);
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void ggml_vbuffer_tensor_alloc(struct vbuffer * buf, struct ggml_tensor * tensor, struct buffer_address buf_addr) {
|
||||
void * base = ggml_backend_buffer_get_base(buf->chunks[buf_addr.chunk]);
|
||||
void * addr = (char *)base + buf_addr.offset;
|
||||
ggml_backend_tensor_alloc(buf->chunks[buf_addr.chunk], tensor, addr);
|
||||
}
|
||||
|
||||
static void ggml_vbuffer_reset(struct vbuffer * buf) {
|
||||
for (int i = 0; i < GGML_VBUFFER_MAX_CHUNKS && buf->chunks[i]; ++i) {
|
||||
ggml_backend_buffer_reset(buf->chunks[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -328,13 +470,13 @@ struct hash_node {
|
||||
int n_children;
|
||||
int n_views;
|
||||
int buffer_id;
|
||||
size_t offset; // offset within the buffer
|
||||
struct buffer_address addr;
|
||||
bool allocated;
|
||||
};
|
||||
|
||||
struct tensor_alloc {
|
||||
int buffer_id;
|
||||
size_t offset;
|
||||
struct buffer_address addr;
|
||||
size_t size_max; // 0 = pre-allocated, unused, or view
|
||||
};
|
||||
|
||||
@@ -349,7 +491,7 @@ struct node_alloc {
|
||||
|
||||
struct ggml_gallocr {
|
||||
ggml_backend_buffer_type_t * bufts; // [n_buffers]
|
||||
ggml_backend_buffer_t * buffers; // [n_buffers]
|
||||
struct vbuffer ** buffers; // [n_buffers]
|
||||
struct ggml_dyn_tallocr ** buf_tallocs; // [n_buffers]
|
||||
int n_buffers;
|
||||
|
||||
@@ -370,7 +512,7 @@ ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs
|
||||
galloc->bufts = calloc(n_bufs, sizeof(ggml_backend_buffer_type_t));
|
||||
GGML_ASSERT(galloc->bufts != NULL);
|
||||
|
||||
galloc->buffers = calloc(n_bufs, sizeof(ggml_backend_buffer_t));
|
||||
galloc->buffers = calloc(n_bufs, sizeof(struct vbuffer *));
|
||||
GGML_ASSERT(galloc->buffers != NULL);
|
||||
|
||||
galloc->buf_tallocs = calloc(n_bufs, sizeof(struct ggml_dyn_tallocr *));
|
||||
@@ -390,7 +532,8 @@ ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs
|
||||
|
||||
if (galloc->buf_tallocs[i] == NULL) {
|
||||
size_t alignment = ggml_backend_buft_get_alignment(bufts[i]);
|
||||
galloc->buf_tallocs[i] = ggml_dyn_tallocr_new(alignment);
|
||||
size_t max_size = ggml_backend_buft_get_max_size(bufts[i]);
|
||||
galloc->buf_tallocs[i] = ggml_dyn_tallocr_new(alignment, max_size);
|
||||
}
|
||||
}
|
||||
galloc->n_buffers = n_bufs;
|
||||
@@ -418,7 +561,7 @@ void ggml_gallocr_free(ggml_gallocr_t galloc) {
|
||||
}
|
||||
}
|
||||
if (!freed) {
|
||||
ggml_backend_buffer_free(galloc->buffers[i]);
|
||||
ggml_vbuffer_free(galloc->buffers[i]);
|
||||
}
|
||||
}
|
||||
if (galloc->buf_tallocs != NULL) {
|
||||
@@ -467,7 +610,7 @@ static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor
|
||||
|
||||
if (!ggml_gallocr_is_allocated(galloc, node) && !ggml_is_view(node)) {
|
||||
hn->allocated = true;
|
||||
assert(hn->offset == 0);
|
||||
assert(hn->addr.offset == 0);
|
||||
|
||||
// try to reuse a parent's buffer (inplace)
|
||||
if (ggml_op_can_inplace(node->op)) {
|
||||
@@ -501,9 +644,9 @@ static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor
|
||||
struct hash_node * view_src_hn = ggml_gallocr_hash_get(galloc, view_src);
|
||||
if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
|
||||
AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
|
||||
assert(view_src_hn->offset == p_hn->offset);
|
||||
assert(view_src_hn->addr.chunk == p_hn->addr.chunk && view_src_hn->addr.offset == p_hn->addr.offset);
|
||||
hn->buffer_id = p_hn->buffer_id;
|
||||
hn->offset = p_hn->offset;
|
||||
hn->addr = p_hn->addr;
|
||||
p_hn->allocated = false; // avoid freeing the parent
|
||||
view_src_hn->allocated = false;
|
||||
return;
|
||||
@@ -511,7 +654,7 @@ static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor
|
||||
} else {
|
||||
AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
|
||||
hn->buffer_id = p_hn->buffer_id;
|
||||
hn->offset = p_hn->offset;
|
||||
hn->addr = p_hn->addr;
|
||||
p_hn->allocated = false; // avoid freeing the parent
|
||||
return;
|
||||
}
|
||||
@@ -522,9 +665,8 @@ static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor
|
||||
struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
|
||||
ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
|
||||
size_t size = ggml_backend_buft_get_alloc_size(buft, node);
|
||||
size_t offset = ggml_dyn_tallocr_alloc(alloc, size, node);
|
||||
hn->buffer_id = buffer_id;
|
||||
hn->offset = offset;
|
||||
hn->addr = ggml_dyn_tallocr_alloc(alloc, size, node);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -536,12 +678,11 @@ static void ggml_gallocr_free_node(ggml_gallocr_t galloc, struct ggml_tensor * n
|
||||
}
|
||||
|
||||
struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
|
||||
size_t offset = hn->offset;
|
||||
int buffer_id = hn->buffer_id;
|
||||
struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
|
||||
ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
|
||||
size_t size = ggml_backend_buft_get_alloc_size(buft, node);
|
||||
ggml_dyn_tallocr_free_tensor(alloc, offset, size, node);
|
||||
ggml_dyn_tallocr_free_tensor(alloc, hn->addr, size, node);
|
||||
hn->allocated = false;
|
||||
}
|
||||
|
||||
@@ -692,24 +833,24 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
|
||||
struct node_alloc * node_alloc = &galloc->node_allocs[i];
|
||||
if (node->view_src || node->data) {
|
||||
node_alloc->dst.buffer_id = -1;
|
||||
node_alloc->dst.offset = SIZE_MAX;
|
||||
node_alloc->dst.addr = GGML_BUFFER_ADDRESS_INVALID;
|
||||
node_alloc->dst.size_max = 0;
|
||||
} else {
|
||||
struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
|
||||
node_alloc->dst.buffer_id = hn->buffer_id;
|
||||
node_alloc->dst.offset = hn->offset;
|
||||
node_alloc->dst.addr = hn->addr;
|
||||
node_alloc->dst.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
|
||||
}
|
||||
for (int j = 0; j < GGML_MAX_SRC; j++) {
|
||||
struct ggml_tensor * src = node->src[j];
|
||||
if (!src || src->view_src || src->data) {
|
||||
node_alloc->src[j].buffer_id = -1;
|
||||
node_alloc->src[j].offset = SIZE_MAX;
|
||||
node_alloc->src[j].addr = GGML_BUFFER_ADDRESS_INVALID;
|
||||
node_alloc->src[j].size_max = 0;
|
||||
} else {
|
||||
struct hash_node * hn = ggml_gallocr_hash_get(galloc, src);
|
||||
node_alloc->src[j].buffer_id = hn->buffer_id;
|
||||
node_alloc->src[j].offset = hn->offset;
|
||||
node_alloc->src[j].addr = hn->addr;
|
||||
node_alloc->src[j].size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src);
|
||||
}
|
||||
}
|
||||
@@ -725,11 +866,11 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
|
||||
struct hash_node * hn = ggml_gallocr_hash_get(galloc, leaf);
|
||||
if (leaf->view_src || leaf->data) {
|
||||
galloc->leaf_allocs[i].leaf.buffer_id = -1;
|
||||
galloc->leaf_allocs[i].leaf.offset = SIZE_MAX;
|
||||
galloc->leaf_allocs[i].leaf.addr = GGML_BUFFER_ADDRESS_INVALID;
|
||||
galloc->leaf_allocs[i].leaf.size_max = 0;
|
||||
} else {
|
||||
galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id;
|
||||
galloc->leaf_allocs[i].leaf.offset = hn->offset;
|
||||
galloc->leaf_allocs[i].leaf.addr = hn->addr;
|
||||
galloc->leaf_allocs[i].leaf.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf);
|
||||
}
|
||||
}
|
||||
@@ -744,7 +885,7 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
|
||||
}
|
||||
}
|
||||
|
||||
size_t cur_size = galloc->buffers[i] ? ggml_backend_buffer_get_size(galloc->buffers[i]) : 0;
|
||||
size_t cur_size = galloc->buffers[i] ? ggml_vbuffer_size(galloc->buffers[i]) : 0;
|
||||
size_t new_size = ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i]);
|
||||
|
||||
// even if there are no tensors allocated in this buffer, we still need to allocate it to initialize views
|
||||
@@ -753,13 +894,12 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
|
||||
GGML_LOG_DEBUG("%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
|
||||
#endif
|
||||
|
||||
ggml_backend_buffer_free(galloc->buffers[i]);
|
||||
galloc->buffers[i] = ggml_backend_buft_alloc_buffer(galloc->bufts[i], new_size);
|
||||
ggml_vbuffer_free(galloc->buffers[i]);
|
||||
galloc->buffers[i] = ggml_vbuffer_alloc(galloc->bufts[i], galloc->buf_tallocs[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE);
|
||||
if (galloc->buffers[i] == NULL) {
|
||||
GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size);
|
||||
return false;
|
||||
}
|
||||
ggml_backend_buffer_set_usage(galloc->buffers[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -772,11 +912,11 @@ bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) {
|
||||
|
||||
static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) {
|
||||
int buffer_id = tensor_alloc->buffer_id;
|
||||
assert(tensor->data || tensor->view_src || ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
|
||||
assert(tensor->data || tensor->view_src || ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max);
|
||||
|
||||
if (tensor->view_src != NULL) {
|
||||
if (tensor->buffer == NULL) {
|
||||
assert(tensor_alloc->offset == SIZE_MAX);
|
||||
assert(tensor_alloc->addr.offset == SIZE_MAX);
|
||||
if (tensor->view_src->buffer == NULL) {
|
||||
// this tensor was allocated without ggml-backend
|
||||
return;
|
||||
@@ -785,11 +925,9 @@ static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor *
|
||||
}
|
||||
} else {
|
||||
if (tensor->data == NULL) {
|
||||
assert(tensor_alloc->offset != SIZE_MAX);
|
||||
assert(ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
|
||||
void * base = ggml_backend_buffer_get_base(galloc->buffers[buffer_id]);
|
||||
void * addr = (char *)base + tensor_alloc->offset;
|
||||
ggml_backend_tensor_alloc(galloc->buffers[buffer_id], tensor, addr);
|
||||
assert(tensor_alloc->addr.offset != SIZE_MAX);
|
||||
assert(ggml_backend_buft_get_alloc_size(galloc->bufts[buffer_id], tensor) <= tensor_alloc->size_max);
|
||||
ggml_vbuffer_tensor_alloc(galloc->buffers[buffer_id], tensor, tensor_alloc->addr);
|
||||
} else {
|
||||
if (tensor->buffer == NULL) {
|
||||
// this tensor was allocated without ggml-backend
|
||||
@@ -874,7 +1012,7 @@ bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph)
|
||||
// reset buffers
|
||||
for (int i = 0; i < galloc->n_buffers; i++) {
|
||||
if (galloc->buffers[i] != NULL) {
|
||||
ggml_backend_buffer_reset(galloc->buffers[i]);
|
||||
ggml_vbuffer_reset(galloc->buffers[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -917,7 +1055,7 @@ size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id) {
|
||||
}
|
||||
}
|
||||
|
||||
return ggml_backend_buffer_get_size(galloc->buffers[buffer_id]);
|
||||
return ggml_vbuffer_size(galloc->buffers[buffer_id]);
|
||||
}
|
||||
|
||||
// utils
|
||||
|
||||
@@ -342,6 +342,10 @@ struct ggml_cgraph {
|
||||
// if you need the gradients, get them from the original graph
|
||||
struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph, int i0, int i1);
|
||||
|
||||
// ggml-alloc.c: true if the operation can reuse memory from its sources
|
||||
GGML_API bool ggml_op_can_inplace(enum ggml_op op);
|
||||
|
||||
|
||||
// Memory allocation
|
||||
|
||||
GGML_API void * ggml_aligned_malloc(size_t size);
|
||||
|
||||
@@ -219,3 +219,6 @@ target_link_libraries(${LLAMA_TEST_NAME} PRIVATE mtmd)
|
||||
get_filename_component(TEST_TARGET test-c.c NAME_WE)
|
||||
add_executable(${TEST_TARGET} test-c.c)
|
||||
target_link_libraries(${TEST_TARGET} PRIVATE llama)
|
||||
|
||||
llama_build_and_test(test-alloc.cpp)
|
||||
target_include_directories(test-alloc PRIVATE ${PROJECT_SOURCE_DIR}/ggml/src)
|
||||
|
||||
572
tests/test-alloc.cpp
Normal file
572
tests/test-alloc.cpp
Normal file
@@ -0,0 +1,572 @@
|
||||
#include <ggml-alloc.h>
|
||||
#include <ggml-backend-impl.h>
|
||||
#include <ggml-cpp.h>
|
||||
#include <ggml-impl.h>
|
||||
#include <ggml.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <exception>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
//
|
||||
// dummy backend with configurable max_buffer_size, tracks allocations
|
||||
|
||||
uint8_t * const alloc_base = (uint8_t *) 16;
|
||||
|
||||
struct dummy_backend_context {
|
||||
size_t max_buffer_size = 64;
|
||||
size_t alignment = 8;
|
||||
|
||||
ggml_backend_buffer_i buffer_interface;
|
||||
std::vector<ggml_backend_buffer_t> buffers;
|
||||
|
||||
size_t allocated_total() const {
|
||||
size_t n = 0;
|
||||
for (ggml_backend_buffer_t buf : buffers) {
|
||||
n += ggml_backend_buffer_get_size(buf);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
};
|
||||
|
||||
// ggml_backend_buffer_type interface
|
||||
|
||||
static const char * dummy_backend_buffer_type_get_name(ggml_backend_buffer_type_t) {
|
||||
return "dummy_buffer_type";
|
||||
}
|
||||
|
||||
static ggml_backend_buffer_t dummy_backend_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
||||
dummy_backend_context * ctx = (dummy_backend_context *) buft->context;
|
||||
ggml_backend_buffer_t & buffer = ctx->buffers.emplace_back();
|
||||
buffer = ggml_backend_buffer_init(buft, ctx->buffer_interface, ctx, size);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static size_t dummy_backend_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
|
||||
dummy_backend_context * ctx = (dummy_backend_context *) buft->context;
|
||||
return ctx->alignment;
|
||||
}
|
||||
|
||||
static size_t dummy_backend_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
|
||||
dummy_backend_context * ctx = (dummy_backend_context *) buft->context;
|
||||
return ctx->max_buffer_size;
|
||||
}
|
||||
|
||||
static bool dummy_backend_buffer_type_is_host(ggml_backend_buffer_type_t) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// ggml_backend_buffer interface
|
||||
|
||||
static void dummy_backend_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
||||
dummy_backend_context * ctx = (dummy_backend_context *) buffer->context;
|
||||
|
||||
auto i = std::find(ctx->buffers.begin(), ctx->buffers.end(), buffer);
|
||||
GGML_ASSERT(i != ctx->buffers.end());
|
||||
ctx->buffers.erase(i);
|
||||
}
|
||||
|
||||
static void * dummy_backend_buffer_get_base(ggml_backend_buffer_t) {
|
||||
return alloc_base;
|
||||
}
|
||||
|
||||
static ggml_status dummy_backend_buffer_init_tensor(ggml_backend_buffer_t, ggml_tensor *) {
|
||||
return GGML_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static void dummy_backend_buffer_memset_tensor(ggml_backend_buffer_t, ggml_tensor *, uint8_t, size_t, size_t) {}
|
||||
|
||||
static void dummy_backend_buffer_set_tensor(ggml_backend_buffer_t, ggml_tensor *, const void *, size_t, size_t) {}
|
||||
|
||||
static void dummy_backend_buffer_get_tensor(ggml_backend_buffer_t, const ggml_tensor *, void *, size_t, size_t) {}
|
||||
|
||||
static void dummy_backend_buffer_clear(ggml_backend_buffer_t, uint8_t) {}
|
||||
|
||||
// dummy_backend (not really a full backend, just provides what gallocr needs)
|
||||
|
||||
struct dummy_backend {
|
||||
std::unique_ptr<dummy_backend_context> context;
|
||||
ggml_backend_buffer_type buffer_type;
|
||||
};
|
||||
|
||||
static dummy_backend dummy_backend_init(size_t max_buffer_size, size_t alignment = 8) {
|
||||
dummy_backend b{};
|
||||
b.context = std::make_unique<dummy_backend_context>();
|
||||
b.context->alignment = alignment;
|
||||
b.context->max_buffer_size = max_buffer_size;
|
||||
|
||||
b.context->buffer_interface.free_buffer = dummy_backend_buffer_free_buffer;
|
||||
b.context->buffer_interface.get_base = dummy_backend_buffer_get_base;
|
||||
b.context->buffer_interface.init_tensor = dummy_backend_buffer_init_tensor;
|
||||
b.context->buffer_interface.memset_tensor = dummy_backend_buffer_memset_tensor;
|
||||
b.context->buffer_interface.set_tensor = dummy_backend_buffer_set_tensor;
|
||||
b.context->buffer_interface.get_tensor = dummy_backend_buffer_get_tensor;
|
||||
b.context->buffer_interface.clear = dummy_backend_buffer_clear;
|
||||
|
||||
b.buffer_type.context = b.context.get();
|
||||
b.buffer_type.iface.get_name = dummy_backend_buffer_type_get_name;
|
||||
b.buffer_type.iface.alloc_buffer = dummy_backend_buffer_type_alloc_buffer;
|
||||
b.buffer_type.iface.get_alignment = dummy_backend_buffer_type_get_alignment;
|
||||
b.buffer_type.iface.get_max_size = dummy_backend_buffer_type_get_max_size;
|
||||
b.buffer_type.iface.is_host = dummy_backend_buffer_type_is_host;
|
||||
return b;
|
||||
}
|
||||
|
||||
//
|
||||
// test utilities
|
||||
|
||||
struct test_context_with_graph {
|
||||
ggml_context * ctx;
|
||||
ggml_cgraph * graph;
|
||||
ggml_context_ptr ctx_ptr;
|
||||
};
|
||||
|
||||
static test_context_with_graph make_context() {
|
||||
ggml_init_params params{};
|
||||
params.mem_size = 48 * ggml_tensor_overhead() + ggml_graph_overhead();
|
||||
params.no_alloc = true;
|
||||
|
||||
ggml_context * ctx = ggml_init(params);
|
||||
ggml_context_ptr ctx_ptr = ggml_context_ptr(ctx);
|
||||
ggml_cgraph * graph = ggml_new_graph(ctx);
|
||||
return { ctx, graph, std::move(ctx_ptr) };
|
||||
}
|
||||
|
||||
static ggml_tensor * make_input_1d(ggml_context * ctx, int64_t n_elements) {
|
||||
ggml_tensor * t = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_elements);
|
||||
ggml_set_input(t);
|
||||
return t;
|
||||
}
|
||||
|
||||
static ggml_tensor * make_input_with_size(ggml_context * ctx, size_t size_bytes) {
|
||||
GGML_ASSERT(size_bytes % 4 == 0);
|
||||
return make_input_1d(ctx, size_bytes / 4);
|
||||
}
|
||||
|
||||
static void assign_names(ggml_context * ctx, const char * prefix = "x") {
|
||||
int i = 0;
|
||||
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t; t = ggml_get_next_tensor(ctx, t)) {
|
||||
ggml_format_name(t, "%s%d", prefix, i++);
|
||||
}
|
||||
}
|
||||
|
||||
static int get_leaf_id(ggml_cgraph * graph, const char * tensor_name) {
|
||||
for (int i = 0; i < graph->n_leafs; ++i) {
|
||||
if (strncmp(graph->leafs[i]->name, tensor_name, GGML_MAX_NAME) == 0) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
fprintf(stderr, "leaf not found: %s\n", tensor_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int get_node_id(ggml_cgraph * graph, const char * tensor_name) {
|
||||
for (int i = 0; i < graph->n_nodes; ++i) {
|
||||
if (strncmp(graph->nodes[i]->name, tensor_name, GGML_MAX_NAME) == 0) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
fprintf(stderr, "node not found: %s", tensor_name);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static ggml_gallocr_ptr allocate_graph(ggml_cgraph * graph, ggml_tensor * out, ggml_backend_buffer_type_t buft) {
|
||||
ggml_set_output(out);
|
||||
ggml_build_forward_expand(graph, out);
|
||||
|
||||
ggml_gallocr_ptr galloc = ggml_gallocr_ptr(ggml_gallocr_new(buft));
|
||||
bool result = ggml_gallocr_alloc_graph(galloc.get(), graph);
|
||||
GGML_ASSERT(result);
|
||||
return galloc;
|
||||
}
|
||||
|
||||
//
|
||||
// correctness checks for result allocations
|
||||
|
||||
static void check_all_allocated(ggml_cgraph * graph) {
|
||||
for (int i = 0; i < ggml_graph_n_nodes(graph); ++i) {
|
||||
ggml_tensor * t = ggml_graph_node(graph, i);
|
||||
GGML_ASSERT(t->buffer != nullptr);
|
||||
GGML_ASSERT(t->data != nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
static void check_max_size(ggml_context * ctx) {
|
||||
for (ggml_tensor * t = ggml_get_first_tensor(ctx); t; t = ggml_get_next_tensor(ctx, t)) {
|
||||
auto buft = ggml_backend_buffer_get_type(t->buffer);
|
||||
size_t max_size = ggml_backend_buft_get_max_size(buft);
|
||||
size_t offset = (char *) t->data - (char *) ggml_backend_buffer_get_base(t->buffer);
|
||||
GGML_ASSERT(t->data >= ggml_backend_buffer_get_base(t->buffer));
|
||||
GGML_ASSERT((size_t) offset + ggml_nbytes(t) <= max_size);
|
||||
}
|
||||
}
|
||||
|
||||
static bool can_reuse_memory(ggml_cgraph * graph, int current_i, ggml_tensor * current, ggml_tensor * other) {
|
||||
if (other->flags & GGML_TENSOR_FLAG_OUTPUT) {
|
||||
return false;
|
||||
}
|
||||
// Check if `other` is still "alive", ie. an input to any node after the `current` op
|
||||
for (int i = current_i; i < ggml_graph_n_nodes(graph); ++i) {
|
||||
ggml_tensor * t = ggml_graph_node(graph, i);
|
||||
for (int s = 0; s < GGML_MAX_SRC; s++) {
|
||||
if (t == current && ggml_op_can_inplace(t->op)) {
|
||||
continue;
|
||||
}
|
||||
if (t->src[s] == other) {
|
||||
return false;
|
||||
}
|
||||
if (t->src[s] && t->src[s]->view_src == other) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool memory_overlap(ggml_tensor * a, ggml_tensor * b) {
|
||||
if (a->buffer != b->buffer) {
|
||||
return false;
|
||||
}
|
||||
int64_t a0 = (int64_t) a->data;
|
||||
int64_t a1 = a0 + ggml_nbytes(a);
|
||||
int64_t b0 = (int64_t) b->data;
|
||||
int64_t b1 = b0 + ggml_nbytes(b);
|
||||
return a1 > b0 && b1 > a0;
|
||||
}
|
||||
|
||||
static ggml_tensor * get_view_source(ggml_tensor * t) {
|
||||
while (t->view_src) {
|
||||
t = t->view_src;
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
static void check_no_overlap(ggml_cgraph * graph) {
|
||||
for (int i = 0; i < ggml_graph_n_nodes(graph); ++i) {
|
||||
for (int j = 0; j < i; ++j) {
|
||||
ggml_tensor * t = ggml_graph_node(graph, i);
|
||||
ggml_tensor * o = ggml_graph_node(graph, j);
|
||||
GGML_ASSERT(t != o);
|
||||
|
||||
if (get_view_source(t) == get_view_source(o)) {
|
||||
continue;
|
||||
}
|
||||
if (memory_overlap(t, o)) {
|
||||
GGML_ASSERT(can_reuse_memory(graph, i, t, o));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// test cases
|
||||
|
||||
// Scenario where the first backend buffer is completely exhausted and there are further
|
||||
// tensors which require a second buffer
|
||||
static void test_max_size_too_many_tensors() {
|
||||
dummy_backend backend = dummy_backend_init(16);
|
||||
auto [ctx, graph, ctx_ptr] = make_context();
|
||||
|
||||
ggml_tensor * x[7];
|
||||
x[0] = make_input_with_size(ctx, 8);
|
||||
x[1] = make_input_with_size(ctx, 8);
|
||||
x[2] = make_input_with_size(ctx, 8);
|
||||
x[3] = ggml_mul(ctx, x[0], x[1]);
|
||||
x[4] = ggml_add(ctx, x[1], x[2]);
|
||||
x[5] = ggml_add(ctx, x[3], x[0]);
|
||||
x[6] = ggml_add(ctx, x[4], x[5]);
|
||||
assign_names(ctx);
|
||||
|
||||
ggml_gallocr_ptr galloc = allocate_graph(graph, x[6], &backend.buffer_type);
|
||||
check_all_allocated(graph);
|
||||
check_no_overlap(graph);
|
||||
check_max_size(ctx);
|
||||
GGML_ASSERT(backend.context->allocated_total() <= 16 + 16);
|
||||
}
|
||||
|
||||
// Scenario where there is some space left in the first buffer, but not enough to accomodate
|
||||
// a larger tensor, so a second buffer is required
|
||||
static void test_max_size_tensor_too_large() {
|
||||
dummy_backend backend = dummy_backend_init(32);
|
||||
auto [ctx, graph, ctx_ptr] = make_context();
|
||||
|
||||
ggml_tensor * x[3];
|
||||
x[0] = make_input_with_size(ctx, 16); // chunk 0, [0 , 16)
|
||||
x[1] = make_input_with_size(ctx, 8); // chunk 0, [16, 24)
|
||||
x[2] = ggml_concat(ctx, x[0], x[1], 0); // chunk 1, [0 , 24)
|
||||
assign_names(ctx);
|
||||
|
||||
ggml_gallocr_ptr galloc = allocate_graph(graph, x[2], &backend.buffer_type);
|
||||
check_all_allocated(graph);
|
||||
check_no_overlap(graph);
|
||||
check_max_size(ctx);
|
||||
GGML_ASSERT(backend.context->allocated_total() <= 32 + 24);
|
||||
}
|
||||
|
||||
// Scenario where a single tensor exceeds the max buffer size - in this case the allocator
|
||||
// should try to create a bigger buffer anyway, and wait for the backend to throw an error.
|
||||
// Backends may report an artificially lower max size in some cases for compatibility reasons.
|
||||
static void test_tensor_larger_than_max_size() {
|
||||
dummy_backend backend = dummy_backend_init(16);
|
||||
auto [ctx, graph, ctx_ptr] = make_context();
|
||||
|
||||
ggml_tensor * x[2];
|
||||
x[0] = make_input_with_size(ctx, 24);
|
||||
x[1] = ggml_scale(ctx, x[0], 2.0f);
|
||||
assign_names(ctx);
|
||||
|
||||
ggml_gallocr_ptr galloc = allocate_graph(graph, x[1], &backend.buffer_type);
|
||||
check_all_allocated(graph);
|
||||
check_no_overlap(graph);
|
||||
GGML_ASSERT(backend.context->allocated_total() == 24);
|
||||
}
|
||||
|
||||
// This test assumes a max of 16 buffer chunks, and tries to allocate tensors that would
|
||||
// require more. Expectation is that the last buffer should grow to fit everything,
|
||||
// leaving it to the backend to error out if it can't allocate that much.
|
||||
static void test_not_enough_chunks() {
|
||||
const int max_chunks = 16;
|
||||
const int max_size = 8;
|
||||
|
||||
dummy_backend backend = dummy_backend_init(max_size);
|
||||
auto [ctx, graph, ctx_ptr] = make_context();
|
||||
|
||||
ggml_tensor * x[max_chunks + 1];
|
||||
for (int i = 0; i < max_chunks + 1; ++i) {
|
||||
x[i] = make_input_with_size(ctx, max_size);
|
||||
}
|
||||
ggml_tensor * acc = x[0];
|
||||
for (int i = 0; i < max_chunks; ++i) {
|
||||
acc = ggml_add(ctx, acc, x[i + 1]);
|
||||
}
|
||||
assign_names(ctx);
|
||||
|
||||
ggml_gallocr_ptr galloc = allocate_graph(graph, acc, &backend.buffer_type);
|
||||
check_all_allocated(graph);
|
||||
check_no_overlap(graph);
|
||||
GGML_ASSERT(backend.context->allocated_total() > max_chunks * max_size);
|
||||
}
|
||||
|
||||
// Fill up leftover unallocated space of a chunk after allocating a large tensor that
|
||||
// requires a new chunk.
|
||||
static void test_fill_leftover_space() {
|
||||
dummy_backend backend = dummy_backend_init(16);
|
||||
auto [ctx, graph, ctx_ptr] = make_context();
|
||||
|
||||
ggml_tensor * x[4];
|
||||
x[0] = make_input_with_size(ctx, 8);
|
||||
x[1] = ggml_pad(ctx, x[0], 2, 0, 0, 0);
|
||||
x[3] = ggml_mean(ctx, x[1]);
|
||||
assign_names(ctx);
|
||||
|
||||
ggml_gallocr_ptr galloc = allocate_graph(graph, x[3], &backend.buffer_type);
|
||||
check_all_allocated(graph);
|
||||
check_no_overlap(graph);
|
||||
check_max_size(ctx);
|
||||
GGML_ASSERT(backend.context->allocated_total() <= 12 + 16);
|
||||
}
|
||||
|
||||
// Check that views don't require any extra memory
|
||||
static void test_view_inplace() {
|
||||
dummy_backend backend = dummy_backend_init(32);
|
||||
auto [ctx, graph, ctx_ptr] = make_context();
|
||||
|
||||
ggml_tensor * x[6];
|
||||
x[0] = make_input_1d(ctx, 4); // chunk 0, [0, 16)
|
||||
x[1] = ggml_reshape_2d(ctx, x[0], 2, 2); // view of x0
|
||||
x[2] = ggml_permute(ctx, x[1], 1, 0, 2, 3); // view of x0
|
||||
x[3] = ggml_view_1d(ctx, x[2], 2, 4); // view of x0
|
||||
x[4] = make_input_1d(ctx, 2); // chunk 0, [16, 24)
|
||||
x[5] = ggml_add(ctx, x[3], x[4]); // reuse (inplace add)
|
||||
assign_names(ctx);
|
||||
|
||||
ggml_gallocr_ptr galloc = allocate_graph(graph, x[5], &backend.buffer_type);
|
||||
check_all_allocated(graph);
|
||||
check_no_overlap(graph);
|
||||
check_max_size(ctx);
|
||||
GGML_ASSERT(backend.context->allocated_total() <= 24);
|
||||
}
|
||||
|
||||
static void test_reuse_and_free() {
|
||||
dummy_backend backend = dummy_backend_init(40);
|
||||
auto [ctx, graph, ctx_ptr] = make_context();
|
||||
|
||||
ggml_tensor * x[9];
|
||||
x[0] = make_input_with_size(ctx, 24);
|
||||
x[1] = make_input_with_size(ctx, 8);
|
||||
x[2] = make_input_with_size(ctx, 8);
|
||||
x[3] = ggml_add(ctx, x[1], x[2]); // reuse, free x2
|
||||
x[4] = ggml_pad(ctx, x[0], 2, 0, 0, 0); // alloc new buffer, free x0
|
||||
x[5] = ggml_scale(ctx, x[4], 2.0f); // alloc from free block
|
||||
x[6] = ggml_add(ctx, x[4], x[5]); // reuse, free x5
|
||||
x[7] = ggml_view_1d(ctx, x[6], 2, 8); // view
|
||||
x[8] = ggml_add(ctx, x[3], x[7]); // reuse
|
||||
assign_names(ctx);
|
||||
|
||||
ggml_gallocr_ptr galloc = allocate_graph(graph, x[8], &backend.buffer_type);
|
||||
check_all_allocated(graph);
|
||||
check_no_overlap(graph);
|
||||
check_max_size(ctx);
|
||||
GGML_ASSERT(backend.context->allocated_total() <= 40 + 32 + 32);
|
||||
}
|
||||
|
||||
static void test_merge_free_block(size_t max_buffer_size) {
|
||||
dummy_backend backend = dummy_backend_init(max_buffer_size);
|
||||
auto [ctx, graph, ctx_ptr] = make_context();
|
||||
|
||||
ggml_tensor * x[9];
|
||||
x[0] = make_input_with_size(ctx, 16);
|
||||
x[1] = make_input_with_size(ctx, 16);
|
||||
x[2] = make_input_with_size(ctx, 16);
|
||||
x[3] = ggml_mean(ctx, x[0]);
|
||||
x[4] = ggml_mean(ctx, x[1]);
|
||||
x[5] = ggml_pad(ctx, x[2], 2, 0, 0, 0);
|
||||
x[6] = ggml_add(ctx, x[3], x[4]);
|
||||
x[7] = ggml_pad(ctx, x[6], 5, 0, 0, 0);
|
||||
x[8] = ggml_add(ctx, x[5], x[7]);
|
||||
assign_names(ctx);
|
||||
|
||||
ggml_gallocr_ptr galloc = allocate_graph(graph, x[8], &backend.buffer_type);
|
||||
check_all_allocated(graph);
|
||||
check_no_overlap(graph);
|
||||
check_max_size(ctx);
|
||||
GGML_ASSERT(backend.context->allocated_total() <= 32 + 32 + 24);
|
||||
}
|
||||
|
||||
// Check that previously allocated but freed memory is preferred over allocating
|
||||
// additional memory, even if the remaining space in a chunk would match tensor size better
|
||||
static void test_prefer_already_allocated_memory() {
|
||||
dummy_backend backend = dummy_backend_init(32, /*align*/ 4);
|
||||
auto [ctx, graph, ctx_ptr] = make_context();
|
||||
|
||||
ggml_tensor * x[3];
|
||||
x[0] = make_input_with_size(ctx, 24); // [24b][8b unused]
|
||||
x[1] = ggml_mean(ctx, x[0]); // [24b free][4b][4b unused]
|
||||
x[2] = ggml_mean(ctx, x[1]); // should be allocated in the 24b block
|
||||
assign_names(ctx);
|
||||
|
||||
ggml_gallocr_ptr galloc = allocate_graph(graph, x[2], &backend.buffer_type);
|
||||
check_all_allocated(graph);
|
||||
check_no_overlap(graph);
|
||||
GGML_ASSERT(backend.context->allocated_total() <= 28);
|
||||
}
|
||||
|
||||
// test for allocating on multiple devices with some tensors in the graph
|
||||
// allocated externally (not by gallocr).
|
||||
static void test_multiple_buffer_types() {
|
||||
dummy_backend backend_a = dummy_backend_init(32);
|
||||
dummy_backend backend_b = dummy_backend_init(SIZE_MAX);
|
||||
|
||||
auto [ctx_a, _a, ctx_a_ptr] = make_context();
|
||||
auto [ctx_b, _b, ctx_b_ptr] = make_context();
|
||||
auto [ctx, graph, ctx_ptr] = make_context();
|
||||
|
||||
ggml_tensor * a[2];
|
||||
a[0] = make_input_with_size(ctx_a, 16);
|
||||
a[1] = make_input_with_size(ctx_a, 16);
|
||||
assign_names(ctx_a, "a");
|
||||
|
||||
ggml_tensor * b[2];
|
||||
b[0] = make_input_with_size(ctx_b, 24);
|
||||
b[1] = make_input_with_size(ctx_b, 4);
|
||||
assign_names(ctx_b, "b");
|
||||
|
||||
ggml_tensor * x[9];
|
||||
x[0] = make_input_with_size(ctx, 16);
|
||||
x[1] = ggml_mul(ctx, x[0], a[0]);
|
||||
x[2] = ggml_pad(ctx, x[1], 2, 0, 0, 0);
|
||||
x[3] = ggml_mul(ctx, x[2], b[0]);
|
||||
x[4] = ggml_mean(ctx, x[3]);
|
||||
x[5] = ggml_add(ctx, x[4], b[1]);
|
||||
x[6] = ggml_pad(ctx, x[5], 3, 0, 0, 0);
|
||||
x[7] = ggml_add(ctx, x[6], a[1]);
|
||||
x[8] = ggml_scale(ctx, x[7], 2.0f);
|
||||
assign_names(ctx, "x");
|
||||
|
||||
ggml_backend_buffer_ptr buf_a(ggml_backend_alloc_ctx_tensors_from_buft(ctx_a, &backend_a.buffer_type));
|
||||
ggml_backend_buffer_ptr buf_b(ggml_backend_alloc_ctx_tensors_from_buft(ctx_b, &backend_b.buffer_type));
|
||||
ggml_backend_buffer_type_t bufts[2] = { &backend_a.buffer_type, &backend_b.buffer_type };
|
||||
|
||||
// assign buffer types manually to avoid extra complexity from backend scheduler
|
||||
ggml_set_output(x[8]);
|
||||
ggml_build_forward_expand(graph, x[8]);
|
||||
|
||||
GGML_ASSERT(graph->n_leafs == 5);
|
||||
int leaf_buffer_ids[5];
|
||||
leaf_buffer_ids[get_leaf_id(graph, "a0")] = 0;
|
||||
leaf_buffer_ids[get_leaf_id(graph, "a1")] = 0;
|
||||
leaf_buffer_ids[get_leaf_id(graph, "b0")] = 1;
|
||||
leaf_buffer_ids[get_leaf_id(graph, "b1")] = 1;
|
||||
leaf_buffer_ids[get_leaf_id(graph, "x0")] = 0;
|
||||
|
||||
GGML_ASSERT(graph->n_nodes == 8);
|
||||
int node_buffer_ids[8];
|
||||
node_buffer_ids[get_node_id(graph, "x1")] = 0;
|
||||
node_buffer_ids[get_node_id(graph, "x2")] = 0;
|
||||
node_buffer_ids[get_node_id(graph, "x3")] = 1;
|
||||
node_buffer_ids[get_node_id(graph, "x4")] = 1;
|
||||
node_buffer_ids[get_node_id(graph, "x5")] = 1;
|
||||
node_buffer_ids[get_node_id(graph, "x6")] = 1;
|
||||
node_buffer_ids[get_node_id(graph, "x7")] = 0;
|
||||
node_buffer_ids[get_node_id(graph, "x8")] = 0;
|
||||
|
||||
ggml_gallocr_ptr galloc(ggml_gallocr_new_n(bufts, 2));
|
||||
ggml_gallocr_reserve_n(galloc.get(), graph, node_buffer_ids, leaf_buffer_ids);
|
||||
ggml_gallocr_alloc_graph(galloc.get(), graph);
|
||||
|
||||
check_all_allocated(graph);
|
||||
check_no_overlap(graph);
|
||||
check_max_size(ctx);
|
||||
GGML_ASSERT(backend_a.context->allocated_total() <= 32 + 32 + 24);
|
||||
GGML_ASSERT(backend_b.context->allocated_total() <= 32 + 24);
|
||||
}
|
||||
|
||||
static void test_buffer_size_zero() {
|
||||
dummy_backend backend_a = dummy_backend_init(SIZE_MAX);
|
||||
dummy_backend backend_b = dummy_backend_init(SIZE_MAX);
|
||||
auto [ctx, graph, ctx_ptr] = make_context();
|
||||
|
||||
ggml_tensor * x[2];
|
||||
x[0] = make_input_with_size(ctx, 16);
|
||||
x[1] = ggml_scale(ctx, x[0], 2.0f);
|
||||
|
||||
ggml_set_output(x[1]);
|
||||
ggml_build_forward_expand(graph, x[1]);
|
||||
|
||||
int leaf_buffer_ids[1] = { 0 };
|
||||
int node_buffer_ids[1] = { 0 };
|
||||
|
||||
ggml_backend_buffer_type_t bufts[2] = { &backend_a.buffer_type, &backend_b.buffer_type };
|
||||
ggml_gallocr_ptr galloc = ggml_gallocr_ptr(ggml_gallocr_new_n(bufts, 2));
|
||||
bool res1 = ggml_gallocr_reserve_n(galloc.get(), graph, node_buffer_ids, leaf_buffer_ids);
|
||||
bool res2 = ggml_gallocr_alloc_graph(galloc.get(), graph);
|
||||
GGML_ASSERT(res1 && res2);
|
||||
|
||||
check_all_allocated(graph);
|
||||
GGML_ASSERT(backend_a.context->allocated_total() == 16);
|
||||
GGML_ASSERT(backend_b.context->allocated_total() == 0);
|
||||
}
|
||||
|
||||
static void run(const char * name, void (*f)()) {
|
||||
printf("%s ", name);
|
||||
fflush(stdout);
|
||||
f();
|
||||
printf("PASSED\n");
|
||||
}
|
||||
|
||||
int main() {
|
||||
run("test_max_size_too_many_tensors", test_max_size_too_many_tensors);
|
||||
run("test_max_size_tensor_too_large", test_max_size_tensor_too_large);
|
||||
run("test_tensor_larger_than_max_size", test_tensor_larger_than_max_size);
|
||||
run("test_not_enough_chunks", test_not_enough_chunks);
|
||||
run("test_fill_leftover_space", test_fill_leftover_space);
|
||||
run("test_view_inplace", test_view_inplace);
|
||||
run("test_reuse_and_free", test_reuse_and_free);
|
||||
run("test_merge_free_block(32)", []() { test_merge_free_block(32); });
|
||||
run("test_merge_free_block(SIZE_MAX)", []() { test_merge_free_block(SIZE_MAX); });
|
||||
run("test_prefer_already_allocated_memory", test_prefer_already_allocated_memory);
|
||||
run("test_multiple_buffer_types", test_multiple_buffer_types);
|
||||
run("test_buffer_size_zero", test_buffer_size_zero);
|
||||
return 0;
|
||||
}
|
||||
Reference in New Issue
Block a user