mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* llama : move sampling code into llama-sampling ggml-ci * llama : move grammar code into llama-grammar ggml-ci * cont ggml-ci * cont : pre-fetch rules * cont ggml-ci * llama : deprecate llama_sample_grammar * llama : move tokenizers into llama-vocab ggml-ci * make : update llama.cpp deps [no ci] * llama : redirect external API to internal APIs ggml-ci * llama : suffix the internal APIs with "_impl" ggml-ci * llama : clean-up
		
			
				
	
	
		
			80 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			Swift
		
	
	
	
	
	
			
		
		
	
	
			80 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			Swift
		
	
	
	
	
	
// swift-tools-version:5.5
 | 
						|
 | 
						|
import PackageDescription
 | 
						|
 | 
						|
var sources = [
 | 
						|
    "src/llama.cpp",
 | 
						|
    "src/llama-vocab.cpp",
 | 
						|
    "src/llama-grammar.cpp",
 | 
						|
    "src/llama-sampling.cpp",
 | 
						|
    "src/unicode.cpp",
 | 
						|
    "src/unicode-data.cpp",
 | 
						|
    "ggml/src/ggml.c",
 | 
						|
    "ggml/src/ggml-alloc.c",
 | 
						|
    "ggml/src/ggml-backend.c",
 | 
						|
    "ggml/src/ggml-quants.c",
 | 
						|
    "ggml/src/ggml-aarch64.c",
 | 
						|
]
 | 
						|
 | 
						|
var resources: [Resource] = []
 | 
						|
var linkerSettings: [LinkerSetting] = []
 | 
						|
var cSettings: [CSetting] =  [
 | 
						|
    .unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
 | 
						|
    .unsafeFlags(["-fno-objc-arc"]),
 | 
						|
    // NOTE: NEW_LAPACK will required iOS version 16.4+
 | 
						|
    // We should consider add this in the future when we drop support for iOS 14
 | 
						|
    // (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
 | 
						|
    // .define("ACCELERATE_NEW_LAPACK"),
 | 
						|
    // .define("ACCELERATE_LAPACK_ILP64")
 | 
						|
]
 | 
						|
 | 
						|
#if canImport(Darwin)
 | 
						|
sources.append("ggml/src/ggml-metal.m")
 | 
						|
resources.append(.process("ggml/src/ggml-metal.metal"))
 | 
						|
linkerSettings.append(.linkedFramework("Accelerate"))
 | 
						|
cSettings.append(
 | 
						|
    contentsOf: [
 | 
						|
        .define("GGML_USE_ACCELERATE"),
 | 
						|
        .define("GGML_USE_METAL")
 | 
						|
    ]
 | 
						|
)
 | 
						|
#endif
 | 
						|
 | 
						|
#if os(Linux)
 | 
						|
    cSettings.append(.define("_GNU_SOURCE"))
 | 
						|
#endif
 | 
						|
 | 
						|
let package = Package(
 | 
						|
    name: "llama",
 | 
						|
    platforms: [
 | 
						|
        .macOS(.v12),
 | 
						|
        .iOS(.v14),
 | 
						|
        .watchOS(.v4),
 | 
						|
        .tvOS(.v14)
 | 
						|
    ],
 | 
						|
    products: [
 | 
						|
        .library(name: "llama", targets: ["llama"]),
 | 
						|
    ],
 | 
						|
    targets: [
 | 
						|
        .target(
 | 
						|
            name: "llama",
 | 
						|
            path: ".",
 | 
						|
            exclude: [
 | 
						|
               "cmake",
 | 
						|
               "examples",
 | 
						|
               "scripts",
 | 
						|
               "models",
 | 
						|
               "tests",
 | 
						|
               "CMakeLists.txt",
 | 
						|
               "Makefile"
 | 
						|
            ],
 | 
						|
            sources: sources,
 | 
						|
            resources: resources,
 | 
						|
            publicHeadersPath: "spm-headers",
 | 
						|
            cSettings: cSettings,
 | 
						|
            linkerSettings: linkerSettings
 | 
						|
        )
 | 
						|
    ],
 | 
						|
    cxxLanguageStandard: .cxx11
 | 
						|
)
 |