diff --git a/.devops/nix/package.nix b/.devops/nix/package.nix index 41748e89d5..a13996bd68 100644 --- a/.devops/nix/package.nix +++ b/.devops/nix/package.nix @@ -34,6 +34,7 @@ rocmGpuTargets ? builtins.concatStringsSep ";" rocmPackages.clr.gpuTargets, enableCurl ? true, useVulkan ? false, + useRpc ? false, llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake # It's necessary to consistently use backendStdenv when building with CUDA support, @@ -175,6 +176,7 @@ effectiveStdenv.mkDerivation (finalAttrs: { (cmakeBool "GGML_METAL" useMetalKit) (cmakeBool "GGML_VULKAN" useVulkan) (cmakeBool "GGML_STATIC" enableStatic) + (cmakeBool "GGML_RPC" useRpc) ] ++ optionals useCuda [ ( diff --git a/tools/rpc/CMakeLists.txt b/tools/rpc/CMakeLists.txt index c2c7481486..20f114ad9b 100644 --- a/tools/rpc/CMakeLists.txt +++ b/tools/rpc/CMakeLists.txt @@ -2,3 +2,7 @@ set(TARGET rpc-server) add_executable(${TARGET} rpc-server.cpp) target_link_libraries(${TARGET} PRIVATE ggml) target_compile_features(${TARGET} PRIVATE cxx_std_17) + +if(LLAMA_TOOLS_INSTALL) + install(TARGETS ${TARGET} RUNTIME) +endif()