mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			40 lines
		
	
	
		
			1.1 KiB
		
	
	
	
		
			Nix
		
	
	
	
	
	
			
		
		
	
	
			40 lines
		
	
	
		
			1.1 KiB
		
	
	
	
		
			Nix
		
	
	
	
	
	
{ inputs, ... }:
 | 
						|
{
 | 
						|
  perSystem =
 | 
						|
    {
 | 
						|
      config,
 | 
						|
      system,
 | 
						|
      lib,
 | 
						|
      pkgsCuda,
 | 
						|
      ...
 | 
						|
    }:
 | 
						|
    {
 | 
						|
      legacyPackages =
 | 
						|
        let
 | 
						|
          caps.llamaPackagesXavier = "7.2";
 | 
						|
          caps.llamaPackagesOrin = "8.7";
 | 
						|
          caps.llamaPackagesTX2 = "6.2";
 | 
						|
          caps.llamaPackagesNano = "5.3";
 | 
						|
 | 
						|
          pkgsFor =
 | 
						|
            cap:
 | 
						|
            import inputs.nixpkgs {
 | 
						|
              inherit system;
 | 
						|
              config = {
 | 
						|
                cudaSupport = true;
 | 
						|
                cudaCapabilities = [ cap ];
 | 
						|
                cudaEnableForwardCompat = false;
 | 
						|
                inherit (pkgsCuda.config) allowUnfreePredicate;
 | 
						|
              };
 | 
						|
            };
 | 
						|
        in
 | 
						|
        builtins.mapAttrs (name: cap: (pkgsFor cap).callPackage ./scope.nix { }) caps;
 | 
						|
 | 
						|
      packages = lib.optionalAttrs (system == "aarch64-linux") {
 | 
						|
        jetson-xavier = config.legacyPackages.llamaPackagesXavier.llama-cpp;
 | 
						|
        jetson-orin = config.legacyPackages.llamaPackagesOrin.llama-cpp;
 | 
						|
        jetson-nano = config.legacyPackages.llamaPackagesNano.llama-cpp;
 | 
						|
      };
 | 
						|
    };
 | 
						|
}
 |