mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-06 09:46:50 +00:00
devops: fix failing s390x docker build (#16918)
This commit is contained in:
@@ -24,8 +24,9 @@ RUN --mount=type=cache,target=/root/.ccache \
|
|||||||
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
|
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
|
||||||
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
|
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
|
||||||
-DLLAMA_BUILD_TESTS=OFF \
|
-DLLAMA_BUILD_TESTS=OFF \
|
||||||
-DGGML_BACKEND_DL=OFF \
|
|
||||||
-DGGML_NATIVE=OFF \
|
-DGGML_NATIVE=OFF \
|
||||||
|
-DGGML_BACKEND_DL=ON \
|
||||||
|
-DGGML_CPU_ALL_VARIANTS=ON \
|
||||||
-DGGML_BLAS=ON \
|
-DGGML_BLAS=ON \
|
||||||
-DGGML_BLAS_VENDOR=OpenBLAS && \
|
-DGGML_BLAS_VENDOR=OpenBLAS && \
|
||||||
cmake --build build --config Release -j $(nproc) && \
|
cmake --build build --config Release -j $(nproc) && \
|
||||||
@@ -103,6 +104,7 @@ FROM base AS light
|
|||||||
WORKDIR /llama.cpp/bin
|
WORKDIR /llama.cpp/bin
|
||||||
|
|
||||||
# Copy llama.cpp binaries and libraries
|
# Copy llama.cpp binaries and libraries
|
||||||
|
COPY --from=collector /llama.cpp/bin/*.so /llama.cpp/bin
|
||||||
COPY --from=collector /llama.cpp/bin/llama-cli /llama.cpp/bin
|
COPY --from=collector /llama.cpp/bin/llama-cli /llama.cpp/bin
|
||||||
|
|
||||||
ENTRYPOINT [ "/llama.cpp/bin/llama-cli" ]
|
ENTRYPOINT [ "/llama.cpp/bin/llama-cli" ]
|
||||||
@@ -116,6 +118,7 @@ ENV LLAMA_ARG_HOST=0.0.0.0
|
|||||||
WORKDIR /llama.cpp/bin
|
WORKDIR /llama.cpp/bin
|
||||||
|
|
||||||
# Copy llama.cpp binaries and libraries
|
# Copy llama.cpp binaries and libraries
|
||||||
|
COPY --from=collector /llama.cpp/bin/*.so /llama.cpp/bin
|
||||||
COPY --from=collector /llama.cpp/bin/llama-server /llama.cpp/bin
|
COPY --from=collector /llama.cpp/bin/llama-server /llama.cpp/bin
|
||||||
|
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
|
|||||||
@@ -7,9 +7,9 @@
|
|||||||
## Images
|
## Images
|
||||||
We have three Docker images available for this project:
|
We have three Docker images available for this project:
|
||||||
|
|
||||||
1. `ghcr.io/ggml-org/llama.cpp:full`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. (platforms: `linux/amd64`, `linux/arm64`)
|
1. `ghcr.io/ggml-org/llama.cpp:full`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. (platforms: `linux/amd64`, `linux/arm64`, `linux/s390x`)
|
||||||
2. `ghcr.io/ggml-org/llama.cpp:light`: This image only includes the main executable file. (platforms: `linux/amd64`, `linux/arm64`)
|
2. `ghcr.io/ggml-org/llama.cpp:light`: This image only includes the main executable file. (platforms: `linux/amd64`, `linux/arm64`, `linux/s390x`)
|
||||||
3. `ghcr.io/ggml-org/llama.cpp:server`: This image only includes the server executable file. (platforms: `linux/amd64`, `linux/arm64`)
|
3. `ghcr.io/ggml-org/llama.cpp:server`: This image only includes the server executable file. (platforms: `linux/amd64`, `linux/arm64`, `linux/s390x`)
|
||||||
|
|
||||||
Additionally, there the following images, similar to the above:
|
Additionally, there the following images, similar to the above:
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user