mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-28 08:31:25 +00:00
examples: cache hf model when --model not provided (#7353)
* examples: cache hf model when --model not provided * examples: cache hf model when --model not provided * examples: cache hf model when --model not provided * examples: cache hf model when --model not provided * examples: cache hf model when --model not provided
This commit is contained in:
@@ -281,6 +281,7 @@ bool llama_should_add_bos_token(const llama_model * model);
|
||||
//
|
||||
|
||||
bool create_directory_with_parents(const std::string & path);
|
||||
std::string get_cache_directory();
|
||||
void dump_vector_float_yaml(FILE * stream, const char * prop_name, const std::vector<float> & data);
|
||||
void dump_vector_int_yaml(FILE * stream, const char * prop_name, const std::vector<int> & data);
|
||||
void dump_string_yaml_multiline(FILE * stream, const char * prop_name, const char * data);
|
||||
|
||||
Reference in New Issue
Block a user