mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	llama : Add test for model load cancellation
This commit is contained in:
		@@ -50,6 +50,7 @@ llama_build_and_test_executable(test-grad0.cpp)
 | 
			
		||||
llama_build_and_test_executable(test-backend-ops.cpp)
 | 
			
		||||
 | 
			
		||||
llama_build_and_test_executable(test-rope.cpp)
 | 
			
		||||
llama_build_and_test_executable(test-model-load-cancel.cpp)
 | 
			
		||||
 | 
			
		||||
# dummy executable - not installed
 | 
			
		||||
get_filename_component(TEST_TARGET test-c.c NAME_WE)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										17
									
								
								tests/test-model-load-cancel.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								tests/test-model-load-cancel.cpp
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,17 @@
 | 
			
		||||
#include "llama.h"
 | 
			
		||||
 | 
			
		||||
#include <cstdlib>
 | 
			
		||||
#include <tuple>
 | 
			
		||||
 | 
			
		||||
int main(void) {
 | 
			
		||||
    llama_backend_init(false);
 | 
			
		||||
    auto params = llama_model_params{};
 | 
			
		||||
    params.use_mmap = false;
 | 
			
		||||
    params.progress_callback = [](float progress, void * ctx){
 | 
			
		||||
        std::ignore = ctx;
 | 
			
		||||
        return progress > 0.50;
 | 
			
		||||
    };
 | 
			
		||||
    auto * model = llama_load_model_from_file("../models/7B/ggml-model-f16.gguf", params);
 | 
			
		||||
    llama_backend_free();
 | 
			
		||||
    return model == nullptr ? EXIT_SUCCESS : EXIT_FAILURE;
 | 
			
		||||
}
 | 
			
		||||
		Reference in New Issue
	
	Block a user