mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* ggml : add RPC backend The RPC backend proxies all operations to a remote server which runs a regular backend (CPU, CUDA, Metal, etc). * set TCP_NODELAY * add CI workflows * Address review comments * fix warning * implement llama_max_devices() for RPC * Address review comments * Address review comments * wrap sockfd into a struct * implement get_alignment and get_max_size * add get_device_memory * fix warning * win32 support * add README * readme : trim trailing whitespace * Address review comments * win32 fix * Address review comments * fix compile warnings on macos
		
			
				
	
	
		
			71 lines
		
	
	
		
			1.9 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			71 lines
		
	
	
		
			1.9 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
#ifdef GGML_USE_CUDA
 | 
						|
#include "ggml-cuda.h"
 | 
						|
#endif
 | 
						|
 | 
						|
#ifdef GGML_USE_METAL
 | 
						|
#include "ggml-metal.h"
 | 
						|
#endif
 | 
						|
 | 
						|
#include "ggml-rpc.h"
 | 
						|
#include <string>
 | 
						|
#include <stdio.h>
 | 
						|
 | 
						|
static ggml_backend_t create_backend() {
 | 
						|
    ggml_backend_t backend = NULL;
 | 
						|
#ifdef GGML_USE_CUDA
 | 
						|
    fprintf(stderr, "%s: using CUDA backend\n", __func__);
 | 
						|
    backend = ggml_backend_cuda_init(0); // init device 0
 | 
						|
    if (!backend) {
 | 
						|
        fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
 | 
						|
    }
 | 
						|
#elif GGML_USE_METAL
 | 
						|
    fprintf(stderr, "%s: using Metal backend\n", __func__);
 | 
						|
    backend = ggml_backend_metal_init();
 | 
						|
    if (!backend) {
 | 
						|
        fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
 | 
						|
    }
 | 
						|
#endif
 | 
						|
 | 
						|
    // if there aren't GPU Backends fallback to CPU backend
 | 
						|
    if (!backend) {
 | 
						|
        fprintf(stderr, "%s: using CPU backend\n", __func__);
 | 
						|
        backend = ggml_backend_cpu_init();
 | 
						|
    }
 | 
						|
    return backend;
 | 
						|
}
 | 
						|
 | 
						|
static void get_backend_memory(size_t * free_mem, size_t * total_mem) {
 | 
						|
#ifdef GGML_USE_CUDA
 | 
						|
    ggml_backend_cuda_get_device_memory(0, free_mem, total_mem);
 | 
						|
#else
 | 
						|
    // TODO: implement for other backends
 | 
						|
    *free_mem = 1;
 | 
						|
    *total_mem = 1;
 | 
						|
#endif
 | 
						|
}
 | 
						|
 | 
						|
int main(int argc, char * argv[]) {
 | 
						|
    if (argc < 3) {
 | 
						|
        fprintf(stderr, "Usage: %s <host> <port>\n", argv[0]);
 | 
						|
        return 1;
 | 
						|
    }
 | 
						|
    const char * host = argv[1];
 | 
						|
    int port = std::stoi(argv[2]);
 | 
						|
    if (port <= 0 || port > 65535) {
 | 
						|
        fprintf(stderr, "Invalid port number: %d\n", port);
 | 
						|
        return 1;
 | 
						|
    }
 | 
						|
    ggml_backend_t backend = create_backend();
 | 
						|
    if (!backend) {
 | 
						|
        fprintf(stderr, "Failed to create backend\n");
 | 
						|
        return 1;
 | 
						|
    }
 | 
						|
    printf("Starting RPC server on %s:%d\n", host, port);
 | 
						|
    size_t free_mem, total_mem;
 | 
						|
    get_backend_memory(&free_mem, &total_mem);
 | 
						|
    std::string endpoint = std::string(host) + ":" + std::to_string(port);
 | 
						|
    start_rpc_server(backend, endpoint.c_str(), free_mem, total_mem);
 | 
						|
    ggml_backend_free(backend);
 | 
						|
    return 0;
 | 
						|
}
 |