mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-02 09:12:03 +00:00 
			
		
		
		
	convert-new.py : output gguf (#2635)
* convert-new.py : output gguf (WIP) * convert-new.py : add gguf key-value pairs * llama : add hparams.ctx_train + no longer print ftype * convert-new.py : minor fixes * convert-new.py : vocab-only option should work now * llama : fix tokenizer to use llama_char_to_byte * tests : add new ggml-vocab-llama.gguf * convert-new.py : tensor name mapping * convert-new.py : add map for skipping tensor serialization * convert-new.py : convert script now works * gguf.py : pick some of the refactoring from #2644 * convert-new.py : minor fixes
This commit is contained in:
		@@ -89,6 +89,8 @@ int main(int argc, char **argv) {
 | 
			
		||||
        return 2;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool success = true;
 | 
			
		||||
 | 
			
		||||
    for (const auto & test_kv : k_tests()) {
 | 
			
		||||
        std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, true);
 | 
			
		||||
        fprintf(stderr, "%s : '%s' tokenized to '%s'\n",
 | 
			
		||||
@@ -103,7 +105,8 @@ int main(int argc, char **argv) {
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (!correct) {
 | 
			
		||||
            fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
 | 
			
		||||
            fprintf(stderr, "%s : failed test:    '%s'\n", __func__, test_kv.first.c_str());
 | 
			
		||||
            fprintf(stderr, "%s : detokenized to: '%s'\n", __func__, unescape_whitespace(ctx, test_kv.second).c_str());
 | 
			
		||||
            fprintf(stderr, "%s : expected tokens: ", __func__);
 | 
			
		||||
            for (const auto & t : test_kv.second) {
 | 
			
		||||
                fprintf(stderr, "%6d, ", t);
 | 
			
		||||
@@ -115,9 +118,7 @@ int main(int argc, char **argv) {
 | 
			
		||||
            }
 | 
			
		||||
            fprintf(stderr, "\n");
 | 
			
		||||
 | 
			
		||||
            llama_free_model(model);
 | 
			
		||||
            llama_free(ctx);
 | 
			
		||||
            return 3;
 | 
			
		||||
            success = false;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
@@ -126,5 +127,5 @@ int main(int argc, char **argv) {
 | 
			
		||||
 | 
			
		||||
    llama_backend_free();
 | 
			
		||||
 | 
			
		||||
    return 0;
 | 
			
		||||
    return success ? 0 : 3;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user