mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	Co-authored-by: canardleteer <eris.has.a.dad+github@gmail.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
		
			
				
	
	
		
			33 lines
		
	
	
		
			772 B
		
	
	
	
		
			Docker
		
	
	
	
	
	
			
		
		
	
	
			33 lines
		
	
	
		
			772 B
		
	
	
	
		
			Docker
		
	
	
	
	
	
ARG UBUNTU_VERSION=22.04
 | 
						|
# This needs to generally match the container host's environment.
 | 
						|
ARG CUDA_VERSION=11.7.1
 | 
						|
# Target the CUDA build image
 | 
						|
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
 | 
						|
# Target the CUDA runtime image
 | 
						|
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
 | 
						|
 | 
						|
FROM ${BASE_CUDA_DEV_CONTAINER} as build
 | 
						|
 | 
						|
# Unless otherwise specified, we make a fat build.
 | 
						|
ARG CUDA_DOCKER_ARCH=all
 | 
						|
 | 
						|
RUN apt-get update && \
 | 
						|
    apt-get install -y build-essential
 | 
						|
 | 
						|
WORKDIR /app
 | 
						|
 | 
						|
COPY . .
 | 
						|
 | 
						|
# Set nvcc architecture
 | 
						|
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
 | 
						|
# Enable cuBLAS
 | 
						|
ENV LLAMA_CUBLAS=1
 | 
						|
 | 
						|
RUN make
 | 
						|
 | 
						|
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
 | 
						|
 | 
						|
COPY --from=build /app/main /main
 | 
						|
 | 
						|
ENTRYPOINT [ "/main" ]
 |