mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* server: add cURL support to `full.Dockerfile` * server: add cURL support to `full-cuda.Dockerfile` and `server-cuda.Dockerfile` * server: add cURL support to `full-rocm.Dockerfile` and `server-rocm.Dockerfile` * server: add cURL support to `server-intel.Dockerfile` * server: add cURL support to `server-vulkan.Dockerfile` * fix typo in `server-vulkan.Dockerfile` Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
		
			
				
	
	
		
			37 lines
		
	
	
		
			829 B
		
	
	
	
		
			Docker
		
	
	
	
	
	
			
		
		
	
	
			37 lines
		
	
	
		
			829 B
		
	
	
	
		
			Docker
		
	
	
	
	
	
ARG UBUNTU_VERSION=22.04
 | 
						|
 | 
						|
# This needs to generally match the container host's environment.
 | 
						|
ARG CUDA_VERSION=11.7.1
 | 
						|
 | 
						|
# Target the CUDA build image
 | 
						|
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
 | 
						|
 | 
						|
FROM ${BASE_CUDA_DEV_CONTAINER} as build
 | 
						|
 | 
						|
# Unless otherwise specified, we make a fat build.
 | 
						|
ARG CUDA_DOCKER_ARCH=all
 | 
						|
 | 
						|
RUN apt-get update && \
 | 
						|
    apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev
 | 
						|
 | 
						|
COPY requirements.txt   requirements.txt
 | 
						|
COPY requirements       requirements
 | 
						|
 | 
						|
RUN pip install --upgrade pip setuptools wheel \
 | 
						|
    && pip install -r requirements.txt
 | 
						|
 | 
						|
WORKDIR /app
 | 
						|
 | 
						|
COPY . .
 | 
						|
 | 
						|
# Set nvcc architecture
 | 
						|
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
 | 
						|
# Enable CUDA
 | 
						|
ENV LLAMA_CUDA=1
 | 
						|
# Enable cURL
 | 
						|
ENV LLAMA_CURL=1
 | 
						|
 | 
						|
RUN make
 | 
						|
 | 
						|
ENTRYPOINT ["/app/.devops/tools.sh"]
 |