mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* add interface for float input * fixed inpL shape and type * add examples of input floats * add test example for embd input * fixed sampling * add free for context * fixed add end condition for generating * add examples for llava.py * add READMD for llava.py * add READMD for llava.py * add example of PandaGPT * refactor the interface and fixed the styles * add cmake build for embd-input * add cmake build for embd-input * Add MiniGPT-4 example * change the order of the args of llama_eval_internal * fix ci error
		
			
				
	
	
		
			16 lines
		
	
	
		
			572 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
			
		
		
	
	
			16 lines
		
	
	
		
			572 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
set(TARGET embdinput)
 | 
						|
add_library(${TARGET} embd-input-lib.cpp embd-input.h)
 | 
						|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
 | 
						|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
 | 
						|
if(TARGET BUILD_INFO)
 | 
						|
  add_dependencies(${TARGET} BUILD_INFO)
 | 
						|
endif()
 | 
						|
 | 
						|
set(TARGET embd-input-test)
 | 
						|
add_executable(${TARGET} embd-input-test.cpp)
 | 
						|
target_link_libraries(${TARGET} PRIVATE common llama embdinput ${CMAKE_THREAD_LIBS_INIT})
 | 
						|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
 | 
						|
if(TARGET BUILD_INFO)
 | 
						|
  add_dependencies(${TARGET} BUILD_INFO)
 | 
						|
endif()
 |