mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* server: add llama_server_queue struct * server: add llama_server_response_event * server: add comments * server: move all mutexes away from server.cpp * server: correct multitask response * server: only add back deferred tasks when one slot is available * server: fix a race condition cause by "request_completion"
		
			
				
	
	
		
			14 lines
		
	
	
		
			554 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
			
		
		
	
	
			14 lines
		
	
	
		
			554 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
set(TARGET server)
 | 
						|
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
 | 
						|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
 | 
						|
add_executable(${TARGET} server.cpp oai.hpp utils.hpp json.hpp httplib.h)
 | 
						|
install(TARGETS ${TARGET} RUNTIME)
 | 
						|
target_compile_definitions(${TARGET} PRIVATE
 | 
						|
    SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
 | 
						|
)
 | 
						|
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
 | 
						|
if (WIN32)
 | 
						|
    TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
 | 
						|
endif()
 | 
						|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
 |