mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	correct working directory for all builds
..and change cache file name as per suggestion.
This commit is contained in:
		@@ -99,7 +99,7 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact            ARGS ${CMAKE
 | 
			
		||||
llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder         ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
 | 
			
		||||
 | 
			
		||||
if (LLAMA_CURL)
 | 
			
		||||
    llama_build_and_test(test-tokenizers-remote.cpp WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
 | 
			
		||||
    llama_build_and_test(test-tokenizers-remote.cpp WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
 | 
			
		||||
endif()
 | 
			
		||||
 | 
			
		||||
if (LLAMA_LLGUIDANCE)
 | 
			
		||||
 
 | 
			
		||||
@@ -47,7 +47,7 @@ static json get_hf_repo_dir(const std::string & hf_repo_with_branch, bool recurs
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // we use "=" to avoid clashing with other component, while still being allowed on windows
 | 
			
		||||
    std::string cached_response_fname = "tree=" + hf_repo + "/" + repo_path + "=" + branch + ".json";
 | 
			
		||||
    std::string cached_response_fname = "test_vocab=" + hf_repo + "/" + repo_path + "=" + branch + ".json";
 | 
			
		||||
    string_replace_all(cached_response_fname, "/", "_");
 | 
			
		||||
    std::string cached_response_path = fs_get_cache_file(cached_response_fname);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user