mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	* scripts : add lib.sh and lib_test.sh * scripts : stub out new ci-run.sh script * scripts : switch to PascalCase for functions This looks a little odd at first, but I find it very useful as a convention to know if a command is part of our code vs a builtin. * scripts : add some fancy conversion from snake_case to PascalCase * Add venv to ci/run.sh * Revert scripts work * scripts : add wrapper script for local use of ci/run.sh * Simplify .gitignore for tests, clang-tidy fixes * Label all ctest tests * ci : ctest uses -L main * Attempt at writing ctest_with_model * Update test-model-load-cancel * ci : add ctest_with_model for debug and release ggml-ci * Fix gg_get_model function ggml-ci * got stuck on CMake * Add get_model.cpp to tests/CMakeLists.txt ggml-ci * Fix README.md output for ctest_with_model ggml-ci * workflows : use `-L main` for all ctest ggml-ci * Fixes * GG_RUN_CTEST_MODELFILE => LLAMACPP_TESTMODELFILE * Always show warning rather than failing if model file variable is not set * scripts : update usage text for ci-run.sh
		
			
				
	
	
		
			25 lines
		
	
	
		
			724 B
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			25 lines
		
	
	
		
			724 B
		
	
	
	
		
			C++
		
	
	
	
	
	
// ref: https://github.com/ggerganov/llama.cpp/issues/4952#issuecomment-1892864763
 | 
						|
 | 
						|
#include <cstdio>
 | 
						|
#include <string>
 | 
						|
#include <thread>
 | 
						|
 | 
						|
#include "llama.h"
 | 
						|
#include "get-model.h"
 | 
						|
 | 
						|
// This creates a new context inside a pthread and then tries to exit cleanly.
 | 
						|
int main(int argc, char ** argv) {
 | 
						|
    auto * model_path = get_model_or_exit(argc, argv);
 | 
						|
 | 
						|
    std::thread([&model_path]() {
 | 
						|
        llama_backend_init(false);
 | 
						|
        auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
 | 
						|
        auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
 | 
						|
        llama_free(ctx);
 | 
						|
        llama_free_model(model);
 | 
						|
        llama_backend_free();
 | 
						|
    }).join();
 | 
						|
 | 
						|
    return 0;
 | 
						|
}
 |