mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* Be more strict about converting float to double * Test equivalence of round, SILU implementations Test module is commented out in CMakeLists.txt because the tests may take a long time, depending on how much the compiler optimizes. * Fix softmax in perplexity.cpp * all : prefer float over double where appropriate * perplexity : add <cmath> --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
		
			
				
	
	
		
			11 lines
		
	
	
		
			453 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
			
		
		
	
	
			11 lines
		
	
	
		
			453 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
function(llama_add_test source)
 | 
						|
    get_filename_component(TEST_TARGET ${source} NAME_WE)
 | 
						|
    add_executable(${TEST_TARGET} ${source})
 | 
						|
    target_link_libraries(${TEST_TARGET} PRIVATE llama)
 | 
						|
    add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
 | 
						|
endfunction()
 | 
						|
 | 
						|
# llama_add_test(test-double-float.c) # SLOW
 | 
						|
llama_add_test(test-quantize.c)
 | 
						|
llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin)
 |