mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : use cmake for swift build
This commit is contained in:
		
							
								
								
									
										72
									
								
								.github/workflows/build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										72
									
								
								.github/workflows/build.yml
									
									
									
									
										vendored
									
									
								
							| @@ -552,35 +552,49 @@ jobs: | |||||||
|             -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml |             -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml | ||||||
|           cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO |           cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO | ||||||
|  |  | ||||||
| # TODO: tmp disabled. see for possible re-enable: |   macOS-latest-swift: | ||||||
| #       https://github.com/ggerganov/llama.cpp/pull/10525 |     runs-on: macos-latest | ||||||
| #  macOS-latest-swift: |  | ||||||
| #    runs-on: macos-latest |     strategy: | ||||||
| # |       matrix: | ||||||
| #    strategy: |         destination: ['generic/platform=macOS', 'generic/platform=iOS', 'generic/platform=tvOS'] | ||||||
| #      matrix: |  | ||||||
| #        destination: ['generic/platform=macOS', 'generic/platform=iOS', 'generic/platform=tvOS'] |     steps: | ||||||
| # |       - name: Clone | ||||||
| #    steps: |         id: checkout | ||||||
| #      - name: Clone |         uses: actions/checkout@v4 | ||||||
| #        id: checkout |  | ||||||
| #        uses: actions/checkout@v4 |       - name: Dependencies | ||||||
| # |         id: depends | ||||||
| #      - name: Dependencies |         continue-on-error: true | ||||||
| #        id: depends |         run: | | ||||||
| #        continue-on-error: true |           brew update | ||||||
| #        run: | |  | ||||||
| #          brew update |       - name: Build llama.cpp with CMake | ||||||
| # |         id: cmake_build | ||||||
| #      - name: xcodebuild for swift package |         run: | | ||||||
| #        id: xcodebuild |           sysctl -a | ||||||
| #        run: | |           mkdir build | ||||||
| #          xcodebuild -scheme llama -destination "${{ matrix.destination }}" |           cd build | ||||||
| # |           cmake -G Xcode .. \ | ||||||
| #      - name: Build Swift Example |             -DGGML_METAL_USE_BF16=ON \ | ||||||
| #        id: make_build_swift_example |             -DGGML_METAL_EMBED_LIBRARY=ON \ | ||||||
| #        run: | |             -DLLAMA_BUILD_EXAMPLES=OFF \ | ||||||
| #            make swift |             -DLLAMA_BUILD_TESTS=OFF \ | ||||||
|  |             -DLLAMA_BUILD_SERVER=OFF \ | ||||||
|  |             -DCMAKE_OSX_ARCHITECTURES="arm64;x86_64" | ||||||
|  |           cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) | ||||||
|  |           sudo cmake --install . --config Release | ||||||
|  |  | ||||||
|  |       - name: xcodebuild for swift package | ||||||
|  |         id: xcodebuild | ||||||
|  |         run: | | ||||||
|  |           xcodebuild -scheme llama-Package -destination "${{ matrix.destination }}" | ||||||
|  |  | ||||||
|  |       - name: Build Swift Example | ||||||
|  |         id: make_build_swift_example | ||||||
|  |         run: | | ||||||
|  |             make swift | ||||||
|  |  | ||||||
|   windows-msys2: |   windows-msys2: | ||||||
|     runs-on: windows-latest |     runs-on: windows-latest | ||||||
|   | |||||||
| @@ -2,60 +2,6 @@ | |||||||
|  |  | ||||||
| import PackageDescription | import PackageDescription | ||||||
|  |  | ||||||
| var sources = [ |  | ||||||
|     "src/llama.cpp", |  | ||||||
|     "src/llama-vocab.cpp", |  | ||||||
|     "src/llama-grammar.cpp", |  | ||||||
|     "src/llama-sampling.cpp", |  | ||||||
|     "src/unicode.cpp", |  | ||||||
|     "src/unicode-data.cpp", |  | ||||||
|     "ggml/src/ggml.c", |  | ||||||
|     "ggml/src/ggml-alloc.c", |  | ||||||
|     "ggml/src/ggml-backend.cpp", |  | ||||||
|     "ggml/src/ggml-backend-reg.cpp", |  | ||||||
|     "ggml/src/ggml-cpu/ggml-cpu.c", |  | ||||||
|     "ggml/src/ggml-cpu/ggml-cpu.cpp", |  | ||||||
|     "ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp", |  | ||||||
|     "ggml/src/ggml-cpu/ggml-cpu-hbm.cpp", |  | ||||||
|     "ggml/src/ggml-cpu/ggml-cpu-quants.c", |  | ||||||
|     "ggml/src/ggml-cpu/ggml-cpu-traits.cpp", |  | ||||||
|     "ggml/src/ggml-threading.cpp", |  | ||||||
|     "ggml/src/ggml-quants.c", |  | ||||||
| ] |  | ||||||
|  |  | ||||||
| var resources: [Resource] = [] |  | ||||||
| var linkerSettings: [LinkerSetting] = [] |  | ||||||
| var cSettings: [CSetting] =  [ |  | ||||||
|     .unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]), |  | ||||||
|     .unsafeFlags(["-fno-objc-arc"]), |  | ||||||
|     .headerSearchPath("ggml/src"), |  | ||||||
|     .headerSearchPath("ggml/src/ggml-cpu"), |  | ||||||
|     // NOTE: NEW_LAPACK will required iOS version 16.4+ |  | ||||||
|     // We should consider add this in the future when we drop support for iOS 14 |  | ||||||
|     // (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc) |  | ||||||
|     // .define("ACCELERATE_NEW_LAPACK"), |  | ||||||
|     // .define("ACCELERATE_LAPACK_ILP64") |  | ||||||
|     .define("GGML_USE_CPU"), |  | ||||||
| ] |  | ||||||
|  |  | ||||||
|  |  | ||||||
| #if canImport(Darwin) |  | ||||||
| sources.append("ggml/src/ggml-common.h") |  | ||||||
| sources.append("ggml/src/ggml-metal/ggml-metal.m") |  | ||||||
| resources.append(.process("ggml/src/ggml-metal/ggml-metal.metal")) |  | ||||||
| linkerSettings.append(.linkedFramework("Accelerate")) |  | ||||||
| cSettings.append( |  | ||||||
|     contentsOf: [ |  | ||||||
|         .define("GGML_USE_ACCELERATE"), |  | ||||||
|         .define("GGML_USE_METAL"), |  | ||||||
|     ] |  | ||||||
| ) |  | ||||||
| #endif |  | ||||||
|  |  | ||||||
| #if os(Linux) |  | ||||||
|     cSettings.append(.define("_GNU_SOURCE")) |  | ||||||
| #endif |  | ||||||
|  |  | ||||||
| let package = Package( | let package = Package( | ||||||
|     name: "llama", |     name: "llama", | ||||||
|     platforms: [ |     platforms: [ | ||||||
| @@ -68,26 +14,6 @@ let package = Package( | |||||||
|         .library(name: "llama", targets: ["llama"]), |         .library(name: "llama", targets: ["llama"]), | ||||||
|     ], |     ], | ||||||
|     targets: [ |     targets: [ | ||||||
|         .target( |         .systemLibrary(name: "llama", pkgConfig: "llama"), | ||||||
|             name: "llama", |     ] | ||||||
|             path: ".", |  | ||||||
|             exclude: [ |  | ||||||
|                "build", |  | ||||||
|                "cmake", |  | ||||||
|                "examples", |  | ||||||
|                "scripts", |  | ||||||
|                "models", |  | ||||||
|                "tests", |  | ||||||
|                "CMakeLists.txt", |  | ||||||
|                "Makefile", |  | ||||||
|                "ggml/src/ggml-metal-embed.metal" |  | ||||||
|             ], |  | ||||||
|             sources: sources, |  | ||||||
|             resources: resources, |  | ||||||
|             publicHeadersPath: "spm-headers", |  | ||||||
|             cSettings: cSettings, |  | ||||||
|             linkerSettings: linkerSettings |  | ||||||
|         ) |  | ||||||
|     ], |  | ||||||
|     cxxLanguageStandard: .cxx17 |  | ||||||
| ) | ) | ||||||
|   | |||||||
							
								
								
									
										4
									
								
								Sources/llama/llama.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								Sources/llama/llama.h
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,4 @@ | |||||||
|  | #pragma once | ||||||
|  |  | ||||||
|  | #include <llama.h> | ||||||
|  |  | ||||||
							
								
								
									
										5
									
								
								Sources/llama/module.modulemap
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								Sources/llama/module.modulemap
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,5 @@ | |||||||
|  | module llama [system] { | ||||||
|  |     header "llama.h" | ||||||
|  |     link "llama" | ||||||
|  |     export * | ||||||
|  | } | ||||||
| @@ -6,5 +6,5 @@ includedir=${prefix}/include | |||||||
| Name: llama | Name: llama | ||||||
| Description: Port of Facebook's LLaMA model in C/C++ | Description: Port of Facebook's LLaMA model in C/C++ | ||||||
| Version: @PROJECT_VERSION@ | Version: @PROJECT_VERSION@ | ||||||
| Libs: -L${libdir} -lllama | Libs: -L${libdir} -lggml  -lggml-base -lllama | ||||||
| Cflags: -I${includedir} | Cflags: -I${includedir} | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 slaren
					slaren