mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	* llama/ggml: add LLM training support more compact progress bar llama_save_model_to_file llama_opt_param_filter ggml_graph_dup force_grads refactor ggml_opt, fix test-opt * remove logits_all * refactor CUDA implementation for ACC * reset graph at beginning of opt period
		
			
				
	
	
		
			44 lines
		
	
	
		
			985 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
			
		
		
	
	
			44 lines
		
	
	
		
			985 B
		
	
	
	
		
			CMake
		
	
	
	
	
	
# dependencies
 | 
						|
 | 
						|
find_package(Threads REQUIRED)
 | 
						|
 | 
						|
# third-party
 | 
						|
 | 
						|
# ...
 | 
						|
 | 
						|
# flags
 | 
						|
 | 
						|
llama_add_compile_flags()
 | 
						|
 | 
						|
# examples
 | 
						|
 | 
						|
if (EMSCRIPTEN)
 | 
						|
else()
 | 
						|
    add_subdirectory(batched)
 | 
						|
    add_subdirectory(embedding)
 | 
						|
    add_subdirectory(eval-callback)
 | 
						|
 | 
						|
    add_subdirectory(gguf-hash)
 | 
						|
    add_subdirectory(gguf)
 | 
						|
    add_subdirectory(gritlm)
 | 
						|
    add_subdirectory(lookahead)
 | 
						|
    add_subdirectory(lookup)
 | 
						|
    add_subdirectory(parallel)
 | 
						|
    add_subdirectory(passkey)
 | 
						|
    add_subdirectory(retrieval)
 | 
						|
    add_subdirectory(save-load-state)
 | 
						|
    add_subdirectory(simple)
 | 
						|
    add_subdirectory(simple-chat)
 | 
						|
    add_subdirectory(speculative)
 | 
						|
    add_subdirectory(speculative-simple)
 | 
						|
    add_subdirectory(gen-docs)
 | 
						|
    add_subdirectory(training)
 | 
						|
    if (NOT GGML_BACKEND_DL)
 | 
						|
        add_subdirectory(convert-llama2c-to-ggml)
 | 
						|
        # these examples use the backends directly and cannot be built with dynamic loading
 | 
						|
        if (GGML_SYCL)
 | 
						|
            add_subdirectory(sycl)
 | 
						|
        endif()
 | 
						|
    endif()
 | 
						|
endif()
 |