mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-27 08:21:30 +00:00
* rpc : report actual free memory Start reporting the free memory on every device instead of using fixed values. Now llama-cli users can get a nice memory breakdown when using RPC devices. * drop --mem in rpc-server
32 lines
1.0 KiB
C
32 lines
1.0 KiB
C
#pragma once
|
|
|
|
#include "ggml.h"
|
|
#include "ggml-backend.h"
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
#define RPC_PROTO_MAJOR_VERSION 3
|
|
#define RPC_PROTO_MINOR_VERSION 0
|
|
#define RPC_PROTO_PATCH_VERSION 0
|
|
#define GGML_RPC_MAX_SERVERS 16
|
|
|
|
// backend API
|
|
GGML_BACKEND_API ggml_backend_t ggml_backend_rpc_init(const char * endpoint, uint32_t device);
|
|
GGML_BACKEND_API bool ggml_backend_is_rpc(ggml_backend_t backend);
|
|
|
|
GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * endpoint, uint32_t device);
|
|
|
|
GGML_BACKEND_API void ggml_backend_rpc_get_device_memory(const char * endpoint, uint32_t device, size_t * free, size_t * total);
|
|
|
|
GGML_BACKEND_API void ggml_backend_rpc_start_server(const char * endpoint, const char * cache_dir,
|
|
size_t n_threads, size_t n_devices, ggml_backend_dev_t * devices);
|
|
|
|
GGML_BACKEND_API ggml_backend_reg_t ggml_backend_rpc_reg(void);
|
|
GGML_BACKEND_API ggml_backend_reg_t ggml_backend_rpc_add_server(const char * endpoint);
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|