mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* llama: add llama_chat_apply_template * test-chat-template: remove dedundant vector * chat_template: do not use std::string for buffer * add clarification for llama_chat_apply_template * llama_chat_apply_template: add zephyr template * llama_chat_apply_template: correct docs * llama_chat_apply_template: use term "chat" everywhere * llama_chat_apply_template: change variable name to "tmpl"
		
			
				
	
	
		
			69 lines
		
	
	
		
			4.0 KiB
		
	
	
	
		
			CMake
		
	
	
	
	
	
			
		
		
	
	
			69 lines
		
	
	
		
			4.0 KiB
		
	
	
	
		
			CMake
		
	
	
	
	
	
function(llama_build_executable source)
 | 
						|
    get_filename_component(TEST_TARGET ${source} NAME_WE)
 | 
						|
    add_executable(${TEST_TARGET} ${source} get-model.cpp)
 | 
						|
    install(TARGETS ${TEST_TARGET} RUNTIME)
 | 
						|
    target_link_libraries(${TEST_TARGET} PRIVATE common)
 | 
						|
endfunction()
 | 
						|
 | 
						|
function(llama_test_executable name source)
 | 
						|
    get_filename_component(TEST_TARGET ${source} NAME_WE)
 | 
						|
    add_test(NAME ${name} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
 | 
						|
    set_property(TEST ${name} PROPERTY LABELS "main")
 | 
						|
endfunction()
 | 
						|
 | 
						|
function(llama_build_and_test_executable source)
 | 
						|
    llama_build_and_test_executable_with_label(${source} "main")
 | 
						|
endfunction()
 | 
						|
 | 
						|
function(llama_build_and_test_executable_with_label source label)
 | 
						|
    get_filename_component(TEST_TARGET ${source} NAME_WE)
 | 
						|
    add_executable(${TEST_TARGET} ${source} get-model.cpp)
 | 
						|
    install(TARGETS ${TEST_TARGET} RUNTIME)
 | 
						|
    target_link_libraries(${TEST_TARGET} PRIVATE common)
 | 
						|
    add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
 | 
						|
    set_property(TEST ${TEST_TARGET} PROPERTY LABELS ${label})
 | 
						|
endfunction()
 | 
						|
 | 
						|
# llama_build_and_test_executable(test-double-float.cpp) # SLOW
 | 
						|
llama_build_and_test_executable(test-quantize-fns.cpp)
 | 
						|
llama_build_and_test_executable(test-quantize-perf.cpp)
 | 
						|
llama_build_and_test_executable(test-sampling.cpp)
 | 
						|
llama_build_and_test_executable(test-chat-template.cpp)
 | 
						|
 | 
						|
llama_build_executable(test-tokenizer-0-llama.cpp)
 | 
						|
llama_test_executable (test-tokenizer-0-llama test-tokenizer-0-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
 | 
						|
 | 
						|
llama_build_executable(test-tokenizer-0-falcon.cpp)
 | 
						|
llama_test_executable (test-tokenizer-0-falcon test-tokenizer-0-falcon.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
 | 
						|
 | 
						|
llama_build_executable(test-tokenizer-1-llama.cpp)
 | 
						|
llama_test_executable (test-tokenizer-1-llama    test-tokenizer-1-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
 | 
						|
llama_test_executable (test-tokenizer-1-baichuan test-tokenizer-1-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
 | 
						|
 | 
						|
llama_build_executable(test-tokenizer-1-bpe.cpp)
 | 
						|
llama_test_executable (test-tokenizer-1-falcon           test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
 | 
						|
llama_test_executable (test-tokenizer-1-aquila           test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
 | 
						|
llama_test_executable (test-tokenizer-1-mpt              test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
 | 
						|
llama_test_executable (test-tokenizer-1-stablelm-3b-4e1t test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-stablelm-3b-4e1t.gguf)
 | 
						|
llama_test_executable (test-tokenizer-1-gpt-neox         test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-neox.gguf)
 | 
						|
llama_test_executable (test-tokenizer-1-refact           test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
 | 
						|
llama_test_executable (test-tokenizer-1-starcoder        test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
 | 
						|
llama_test_executable (test-tokenizer-1-gpt2             test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt2.gguf)
 | 
						|
# llama_test_executable (test-tokenizer-1-bloom test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bloom.gguf) # BIG
 | 
						|
 | 
						|
llama_build_and_test_executable(test-grammar-parser.cpp)
 | 
						|
llama_build_and_test_executable(test-llama-grammar.cpp)
 | 
						|
llama_build_and_test_executable(test-grad0.cpp)
 | 
						|
# llama_build_and_test_executable(test-opt.cpp) # SLOW
 | 
						|
llama_build_and_test_executable(test-backend-ops.cpp)
 | 
						|
 | 
						|
llama_build_and_test_executable(test-rope.cpp)
 | 
						|
 | 
						|
llama_build_and_test_executable_with_label(test-model-load-cancel.cpp "model")
 | 
						|
llama_build_and_test_executable_with_label(test-autorelease.cpp "model")
 | 
						|
 | 
						|
# dummy executable - not installed
 | 
						|
get_filename_component(TEST_TARGET test-c.c NAME_WE)
 | 
						|
add_executable(${TEST_TARGET} test-c.c)
 | 
						|
target_link_libraries(${TEST_TARGET} PRIVATE llama)
 |