mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	Exposes a few attributes demonstrating how to build [singularity](https://docs.sylabs.io/guides/latest/user-guide/)/[apptainer](https://apptainer.org/) and Docker images re-using llama.cpp's Nix expression. Built locally on `x86_64-linux` with `nix build github:someoneserge/llama.cpp/feat/nix/images#llamaPackages.{docker,docker-min,sif,llama-cpp}` and it's fast and effective.
		
			
				
	
	
		
			38 lines
		
	
	
		
			850 B
		
	
	
	
		
			Nix
		
	
	
	
	
	
			
		
		
	
	
			38 lines
		
	
	
		
			850 B
		
	
	
	
		
			Nix
		
	
	
	
	
	
{
 | 
						|
  lib,
 | 
						|
  dockerTools,
 | 
						|
  buildEnv,
 | 
						|
  llama-cpp,
 | 
						|
  interactive ? true,
 | 
						|
  coreutils,
 | 
						|
}:
 | 
						|
 | 
						|
# A tar that can be fed into `docker load`:
 | 
						|
#
 | 
						|
# $ nix build .#llamaPackages.docker
 | 
						|
# $ docker load < result
 | 
						|
 | 
						|
# For details and variations cf.
 | 
						|
# - https://nixos.org/manual/nixpkgs/unstable/#ssec-pkgs-dockerTools-buildLayeredImage
 | 
						|
# - https://discourse.nixos.org/t/a-faster-dockertools-buildimage-prototype/16922
 | 
						|
# - https://nixery.dev/
 | 
						|
 | 
						|
# Approximate (compressed) sizes, at the time of writing, are:
 | 
						|
#
 | 
						|
# .#llamaPackages.docker: 125M;
 | 
						|
# .#llamaPackagesCuda.docker: 537M;
 | 
						|
# .#legacyPackages.aarch64-linux.llamaPackagesXavier.docker: 415M.
 | 
						|
 | 
						|
dockerTools.buildLayeredImage {
 | 
						|
  name = llama-cpp.pname;
 | 
						|
  tag = "latest";
 | 
						|
 | 
						|
  contents =
 | 
						|
    [ llama-cpp ]
 | 
						|
    ++ lib.optionals interactive [
 | 
						|
      coreutils
 | 
						|
      dockerTools.binSh
 | 
						|
      dockerTools.caCertificates
 | 
						|
    ];
 | 
						|
}
 |