mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-28 08:31:25 +00:00 
			
		
		
		
	[SYCL] Update rpc-server.cpp to include SYCL backend (#7682)
* Update rpc-server.cpp to include SYCL backend Draft PR to address inclusion of SYCL backend for RPC server * Update rpc-server.cpp
This commit is contained in:
		| @@ -6,6 +6,10 @@ | ||||
| #include "ggml-metal.h" | ||||
| #endif | ||||
|  | ||||
| #ifdef GGML_USE_SYCL | ||||
| #include "ggml-sycl.h" | ||||
| #endif | ||||
|  | ||||
| #include "ggml-rpc.h" | ||||
| #ifdef _WIN32 | ||||
| #  include <windows.h> | ||||
| @@ -79,6 +83,12 @@ static ggml_backend_t create_backend() { | ||||
|     if (!backend) { | ||||
|         fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); | ||||
|     } | ||||
| #elif GGML_USE_SYCL | ||||
|     fprintf(stderr, "%s: using SYCL backend\n", __func__); | ||||
|     backend = ggml_backend_sycl_init(0); // init device 0 | ||||
|     if (!backend) { | ||||
|         fprintf(stderr, "%s: ggml_backend_sycl_init() failed\n", __func__); | ||||
|     } | ||||
| #endif | ||||
|  | ||||
|     // if there aren't GPU Backends fallback to CPU backend | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 nickp27
					nickp27