mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	 381efbf480
			
		
	
	381efbf480
	
	
	
		
			
			* wip llava python bindings compatibility * add external llava API * add base64 in-prompt image support * wip refactor image loading * refactor image load out of llava init * cleanup * further cleanup; move llava-cli into its own file and rename * move base64.hpp into common/ * collapse clip and llava libraries * move llava into its own subdir * wip * fix bug where base64 string was not removed from the prompt * get libllava to output in the right place * expose llava methods in libllama.dylib * cleanup memory usage around clip_image_* * cleanup and refactor *again* * update headerdoc * build with cmake, not tested (WIP) * Editorconfig * Editorconfig * Build with make * Build with make * Fix cyclical depts on Windows * attempt to fix build on Windows * attempt to fix build on Windows * Upd TODOs * attempt to fix build on Windows+CUDA * Revert changes in cmake * Fix according to review comments * Support building as a shared library * address review comments --------- Co-authored-by: M. Yusuf Sarıgöz <yusufsarigoz@gmail.com> Co-authored-by: Jared Van Bortel <jared@nomic.ai>
		
			
				
	
	
		
			14 lines
		
	
	
		
			542 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
			
		
		
	
	
			14 lines
		
	
	
		
			542 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
| set(TARGET server)
 | |
| option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
 | |
| include_directories(${CMAKE_CURRENT_SOURCE_DIR})
 | |
| add_executable(${TARGET} server.cpp json.hpp httplib.h)
 | |
| install(TARGETS ${TARGET} RUNTIME)
 | |
| target_compile_definitions(${TARGET} PRIVATE
 | |
|     SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
 | |
| )
 | |
| target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT})
 | |
| if (WIN32)
 | |
|     TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
 | |
| endif()
 | |
| target_compile_features(${TARGET} PRIVATE cxx_std_11)
 |