mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	build : link against build info instead of compiling against it (#3879)
* cmake : fix build when .git does not exist * cmake : simplify BUILD_INFO target * cmake : add missing dependencies on BUILD_INFO * build : link against build info instead of compiling against it * zig : make build info a .cpp source instead of a header Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com> * cmake : revert change to CMP0115 --------- Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com>
This commit is contained in:
		| @@ -1,9 +1,6 @@ | ||||
| set(TARGET benchmark) | ||||
| add_executable(${TARGET} benchmark-matmult.cpp) | ||||
| install(TARGETS ${TARGET} RUNTIME) | ||||
| target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_link_libraries(${TARGET} PRIVATE llama build_info ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_include_directories(${TARGET} PRIVATE ../../common) | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
| if(TARGET BUILD_INFO) | ||||
|   add_dependencies(${TARGET} BUILD_INFO) | ||||
| endif() | ||||
|   | ||||
| @@ -1,4 +1,3 @@ | ||||
| #include "build-info.h" | ||||
| #include "common.h" | ||||
| #include "ggml.h" | ||||
|  | ||||
|   | ||||
| @@ -3,6 +3,3 @@ add_executable(${TARGET} embedding.cpp) | ||||
| install(TARGETS ${TARGET} RUNTIME) | ||||
| target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
| if(TARGET BUILD_INFO) | ||||
|   add_dependencies(${TARGET} BUILD_INFO) | ||||
| endif() | ||||
|   | ||||
| @@ -1,4 +1,3 @@ | ||||
| #include "build-info.h" | ||||
| #include "common.h" | ||||
| #include "llama.h" | ||||
|  | ||||
|   | ||||
| @@ -3,6 +3,3 @@ add_executable(${TARGET} infill.cpp) | ||||
| install(TARGETS ${TARGET} RUNTIME) | ||||
| target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
| if(TARGET BUILD_INFO) | ||||
|     add_dependencies(${TARGET} BUILD_INFO) | ||||
| endif() | ||||
|   | ||||
| @@ -2,7 +2,6 @@ | ||||
|  | ||||
| #include "console.h" | ||||
| #include "llama.h" | ||||
| #include "build-info.h" | ||||
| #include "grammar-parser.h" | ||||
|  | ||||
| #include <cassert> | ||||
| @@ -184,8 +183,8 @@ int main(int argc, char ** argv) { | ||||
|         LOG_TEE("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale); | ||||
|     } | ||||
|  | ||||
|     LOG_TEE("%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); | ||||
|     LOG_TEE("%s: built with %s for %s\n", __func__, BUILD_COMPILER, BUILD_TARGET); | ||||
|     LOG_TEE("%s: build = %d (%s)\n",      __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT); | ||||
|     LOG_TEE("%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); | ||||
|  | ||||
|     if (params.seed == LLAMA_DEFAULT_SEED) { | ||||
|         params.seed = time(NULL); | ||||
|   | ||||
| @@ -3,6 +3,3 @@ add_executable(${TARGET} llama-bench.cpp) | ||||
| install(TARGETS ${TARGET} RUNTIME) | ||||
| target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
| if(TARGET BUILD_INFO) | ||||
|   add_dependencies(${TARGET} BUILD_INFO) | ||||
| endif() | ||||
|   | ||||
| @@ -19,7 +19,6 @@ | ||||
| #include "ggml.h" | ||||
| #include "llama.h" | ||||
| #include "common.h" | ||||
| #include "build-info.h" | ||||
| #include "ggml-cuda.h" | ||||
|  | ||||
| // utils | ||||
| @@ -641,8 +640,8 @@ struct test { | ||||
|     } | ||||
| }; | ||||
|  | ||||
| const std::string test::build_commit = BUILD_COMMIT; | ||||
| const int         test::build_number = BUILD_NUMBER; | ||||
| const std::string test::build_commit = LLAMA_COMMIT; | ||||
| const int         test::build_number = LLAMA_BUILD_NUMBER; | ||||
| const bool        test::cuda         = !!ggml_cpu_has_cublas(); | ||||
| const bool        test::opencl       = !!ggml_cpu_has_clblast(); | ||||
| const bool        test::metal        = !!ggml_cpu_has_metal(); | ||||
|   | ||||
| @@ -5,9 +5,6 @@ target_link_libraries(${TARGET} PRIVATE common ggml ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
| if (NOT MSVC) | ||||
|     target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h | ||||
|     endif() | ||||
| if(TARGET BUILD_INFO) | ||||
|     add_dependencies(${TARGET} BUILD_INFO) | ||||
| endif() | ||||
|  | ||||
| set(TARGET llava) | ||||
| @@ -15,6 +12,3 @@ add_executable(${TARGET} llava.cpp) | ||||
| install(TARGETS ${TARGET} RUNTIME) | ||||
| target_link_libraries(${TARGET} PRIVATE common llama clip ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
| if(TARGET BUILD_INFO) | ||||
|     add_dependencies(${TARGET} BUILD_INFO) | ||||
| endif() | ||||
|   | ||||
| @@ -3,6 +3,3 @@ add_executable(${TARGET} main.cpp) | ||||
| install(TARGETS ${TARGET} RUNTIME) | ||||
| target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
| if(TARGET BUILD_INFO) | ||||
|   add_dependencies(${TARGET} BUILD_INFO) | ||||
| endif() | ||||
|   | ||||
| @@ -2,7 +2,6 @@ | ||||
|  | ||||
| #include "console.h" | ||||
| #include "llama.h" | ||||
| #include "build-info.h" | ||||
|  | ||||
| #include <cassert> | ||||
| #include <cinttypes> | ||||
| @@ -153,8 +152,8 @@ int main(int argc, char ** argv) { | ||||
|         LOG_TEE("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale); | ||||
|     } | ||||
|  | ||||
|     LOG_TEE("%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); | ||||
|     LOG_TEE("%s: built with %s for %s\n", __func__, BUILD_COMPILER, BUILD_TARGET); | ||||
|     LOG_TEE("%s: build = %d (%s)\n",      __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT); | ||||
|     LOG_TEE("%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); | ||||
|  | ||||
|     if (params.seed == LLAMA_DEFAULT_SEED) { | ||||
|         params.seed = time(NULL); | ||||
|   | ||||
| @@ -3,6 +3,3 @@ add_executable(${TARGET} parallel.cpp) | ||||
| install(TARGETS ${TARGET} RUNTIME) | ||||
| target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
| if(TARGET BUILD_INFO) | ||||
|   add_dependencies(${TARGET} BUILD_INFO) | ||||
| endif() | ||||
|   | ||||
| @@ -1,8 +1,6 @@ | ||||
| // A basic application simulating a server with multiple clients. | ||||
| // The clients submite requests to the server and they are processed in parallel. | ||||
|  | ||||
| #include "build-info.h" | ||||
|  | ||||
| #include "common.h" | ||||
| #include "llama.h" | ||||
|  | ||||
|   | ||||
| @@ -3,6 +3,3 @@ add_executable(${TARGET} perplexity.cpp) | ||||
| install(TARGETS ${TARGET} RUNTIME) | ||||
| target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
| if(TARGET BUILD_INFO) | ||||
|   add_dependencies(${TARGET} BUILD_INFO) | ||||
| endif() | ||||
|   | ||||
| @@ -1,4 +1,3 @@ | ||||
| #include "build-info.h" | ||||
| #include "common.h" | ||||
| #include "llama.h" | ||||
|  | ||||
|   | ||||
| @@ -1,6 +1,6 @@ | ||||
| set(TARGET quantize-stats) | ||||
| add_executable(${TARGET} quantize-stats.cpp) | ||||
| install(TARGETS ${TARGET} RUNTIME) | ||||
| target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_link_libraries(${TARGET} PRIVATE llama build_info ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_include_directories(${TARGET} PRIVATE ../../common) | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
|   | ||||
| @@ -1,5 +1,4 @@ | ||||
| #define LLAMA_API_INTERNAL | ||||
| #include "build-info.h" | ||||
| #include "common.h" | ||||
| #include "ggml.h" | ||||
| #include "llama.h" | ||||
|   | ||||
| @@ -1,9 +1,6 @@ | ||||
| set(TARGET quantize) | ||||
| add_executable(${TARGET} quantize.cpp) | ||||
| install(TARGETS ${TARGET} RUNTIME) | ||||
| target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_link_libraries(${TARGET} PRIVATE llama build_info ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_include_directories(${TARGET} PRIVATE ../../common) | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
| if(TARGET BUILD_INFO) | ||||
|   add_dependencies(${TARGET} BUILD_INFO) | ||||
| endif() | ||||
|   | ||||
| @@ -1,4 +1,3 @@ | ||||
| #include "build-info.h" | ||||
| #include "common.h" | ||||
| #include "llama.h" | ||||
|  | ||||
|   | ||||
| @@ -3,6 +3,3 @@ add_executable(${TARGET} save-load-state.cpp) | ||||
| install(TARGETS ${TARGET} RUNTIME) | ||||
| target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
| if(TARGET BUILD_INFO) | ||||
|   add_dependencies(${TARGET} BUILD_INFO) | ||||
| endif() | ||||
|   | ||||
| @@ -1,4 +1,3 @@ | ||||
| #include "build-info.h" | ||||
| #include "common.h" | ||||
| #include "llama.h" | ||||
|  | ||||
|   | ||||
| @@ -11,6 +11,3 @@ if (WIN32) | ||||
|     TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) | ||||
| endif() | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
| if(TARGET BUILD_INFO) | ||||
|   add_dependencies(${TARGET} BUILD_INFO) | ||||
| endif() | ||||
|   | ||||
| @@ -1,6 +1,5 @@ | ||||
| #include "common.h" | ||||
| #include "llama.h" | ||||
| #include "build-info.h" | ||||
| #include "grammar-parser.h" | ||||
|  | ||||
| #include "../llava/clip.h" | ||||
| @@ -2264,8 +2263,8 @@ int main(int argc, char **argv) | ||||
|  | ||||
|     llama_backend_init(params.numa); | ||||
|  | ||||
|     LOG_INFO("build info", {{"build", BUILD_NUMBER}, | ||||
|                             {"commit", BUILD_COMMIT}}); | ||||
|     LOG_INFO("build info", {{"build", LLAMA_BUILD_NUMBER}, | ||||
|                             {"commit", LLAMA_COMMIT}}); | ||||
|  | ||||
|     LOG_INFO("system info", { | ||||
|                                 {"n_threads", params.n_threads}, | ||||
|   | ||||
| @@ -3,6 +3,3 @@ add_executable(${TARGET} speculative.cpp) | ||||
| install(TARGETS ${TARGET} RUNTIME) | ||||
| target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) | ||||
| target_compile_features(${TARGET} PRIVATE cxx_std_11) | ||||
| if(TARGET BUILD_INFO) | ||||
|   add_dependencies(${TARGET} BUILD_INFO) | ||||
| endif() | ||||
|   | ||||
| @@ -1,5 +1,3 @@ | ||||
| #include "build-info.h" | ||||
|  | ||||
| #include "common.h" | ||||
| #include "llama.h" | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 cebtenzzre
					cebtenzzre