mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-29 08:41:22 +00:00 
			
		
		
		
	 ab14019821
			
		
	
	ab14019821
	
	
	
		
			
			* Support diffusion models: Add Dream 7B * Move diffusion to examples * Move stuff to examples. Add patch to not use kv-cache * Address review comments * Make sampling fast * llama: remove diffusion functions * Add basic timings + cleanup * More cleanup * Review comments: better formating, use LOG instead std::cerr, re-use batch, use ubatch instead of max_length * fixup! * Review: move everything to diffusion-cli for now
		
			
				
	
	
		
			6 lines
		
	
	
		
			245 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
			
		
		
	
	
			6 lines
		
	
	
		
			245 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
| set(TARGET llama-diffusion-cli)
 | |
| add_executable(${TARGET} diffusion-cli.cpp)
 | |
| install(TARGETS ${TARGET} RUNTIME)
 | |
| target_link_libraries(${TARGET} PRIVATE llama common ${CMAKE_THREAD_LIBS_INIT})
 | |
| target_compile_features(${TARGET} PRIVATE cxx_std_17)
 |