mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	 f3f65429c4
			
		
	
	f3f65429c4
	
	
	
		
			
			* scripts : update sync [no ci] * files : relocate [no ci] * ci : disable kompute build [no ci] * cmake : fixes [no ci] * server : fix mingw build ggml-ci * cmake : minor [no ci] * cmake : link math library [no ci] * cmake : build normal ggml library (not object library) [no ci] * cmake : fix kompute build ggml-ci * make,cmake : fix LLAMA_CUDA + replace GGML_CDEF_PRIVATE ggml-ci * move public backend headers to the public include directory (#8122) * move public backend headers to the public include directory * nix test * spm : fix metal header --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * scripts : fix sync paths [no ci] * scripts : sync ggml-blas.h [no ci] --------- Co-authored-by: slaren <slarengh@gmail.com>
		
			
				
	
	
		
			66 lines
		
	
	
		
			1.8 KiB
		
	
	
	
		
			CMake
		
	
	
	
	
	
			
		
		
	
	
			66 lines
		
	
	
		
			1.8 KiB
		
	
	
	
		
			CMake
		
	
	
	
	
	
| set(LLAMA_VERSION      @LLAMA_INSTALL_VERSION@)
 | |
| set(LLAMA_BUILD_COMMIT @LLAMA_BUILD_COMMIT@)
 | |
| set(LLAMA_BUILD_NUMBER @LLAMA_BUILD_NUMBER@)
 | |
| set(LLAMA_SHARED_LIB   @BUILD_SHARED_LIBS@)
 | |
| 
 | |
| set(GGML_BLAS       @GGML_BLAS@)
 | |
| set(GGML_CUDA       @GGML_CUDA@)
 | |
| set(GGML_METAL      @GGML_METAL@)
 | |
| set(GGML_HIPBLAS    @GGML_HIPBLAS@)
 | |
| set(GGML_ACCELERATE @GGML_ACCELERATE@)
 | |
| 
 | |
| @PACKAGE_INIT@
 | |
| 
 | |
| set_and_check(LLAMA_INCLUDE_DIR "@PACKAGE_LLAMA_INCLUDE_INSTALL_DIR@")
 | |
| set_and_check(LLAMA_LIB_DIR     "@PACKAGE_LLAMA_LIB_INSTALL_DIR@")
 | |
| set_and_check(LLAMA_BIN_DIR     "@PACKAGE_LLAMA_BIN_INSTALL_DIR@")
 | |
| 
 | |
| # Ensure transient dependencies satisfied
 | |
| 
 | |
| find_package(Threads REQUIRED)
 | |
| 
 | |
| if (APPLE AND GGML_ACCELERATE)
 | |
|     find_library(ACCELERATE_FRAMEWORK Accelerate REQUIRED)
 | |
| endif()
 | |
| 
 | |
| if (GGML_BLAS)
 | |
|     find_package(BLAS REQUIRED)
 | |
| endif()
 | |
| 
 | |
| if (GGML_CUDA)
 | |
|     find_package(CUDAToolkit REQUIRED)
 | |
| endif()
 | |
| 
 | |
| if (GGML_METAL)
 | |
|     find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
 | |
|     find_library(METAL_FRAMEWORK Metal REQUIRED)
 | |
|     find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
 | |
| endif()
 | |
| 
 | |
| if (GGML_HIPBLAS)
 | |
|     find_package(hip REQUIRED)
 | |
|     find_package(hipblas REQUIRED)
 | |
|     find_package(rocblas REQUIRED)
 | |
| endif()
 | |
| 
 | |
| find_library(llama_LIBRARY llama
 | |
|     REQUIRED
 | |
|     HINTS ${LLAMA_LIB_DIR})
 | |
| 
 | |
| set(_llama_link_deps "Threads::Threads" "@LLAMA_EXTRA_LIBS@")
 | |
| set(_llama_transient_defines "@LLAMA_TRANSIENT_DEFINES@")
 | |
| 
 | |
| add_library(llama UNKNOWN IMPORTED)
 | |
| 
 | |
| set_target_properties(llama
 | |
|     PROPERTIES
 | |
|         INTERFACE_INCLUDE_DIRECTORIES "${LLAMA_INCLUDE_DIR}"
 | |
|         INTERFACE_LINK_LIBRARIES "${_llama_link_deps}"
 | |
|         INTERFACE_COMPILE_DEFINITIONS "${_llama_transient_defines}"
 | |
|         IMPORTED_LINK_INTERFACE_LANGUAGES "CXX"
 | |
|         IMPORTED_LOCATION "${llama_LIBRARY}"
 | |
|         INTERFACE_COMPILE_FEATURES cxx_std_11
 | |
|         POSITION_INDEPENDENT_CODE ON )
 | |
| 
 | |
| check_required_components(Llama)
 |