mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	 8c70a5ff25
			
		
	
	8c70a5ff25
	
	
	
		
			
			* batched : add bench tool * batched : minor fix table * batched-bench : add readme + n_kv_max is now configurable * batched-bench : init warm-up batch * batched-bench : pass custom set of PP, TG and PL * batched-bench : add mmq CLI arg
		
			
				
	
	
		
			6 lines
		
	
	
		
			239 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
			
		
		
	
	
			6 lines
		
	
	
		
			239 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
| set(TARGET batched-bench)
 | |
| add_executable(${TARGET} batched-bench.cpp)
 | |
| install(TARGETS ${TARGET} RUNTIME)
 | |
| target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
 | |
| target_compile_features(${TARGET} PRIVATE cxx_std_11)
 |