mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	whisper : tokenizer fix + re-enable tokenizer test for LLaMa (#3096)
* Fix für #2721 * Reenable tokenizer test for LLaMa * Add `console.cpp` dependency * Fix dependency to `common` * Fixing wrong fix. * Make console usage platform specific Work on compiler warnings. * Adapting makefile * Remove trailing whitespace * Adapting the other parts of the makefile * Fix typo.
This commit is contained in:
		
							
								
								
									
										6
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										6
									
								
								Makefile
									
									
									
									
									
								
							| @@ -2,7 +2,7 @@ | ||||
| BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple save-load-state server embd-input-test gguf llama-bench baby-llama beam-search speculative tests/test-c.o | ||||
|  | ||||
| # Binaries only useful for tests | ||||
| TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama tests/test-tokenizer-0-falcon tests/test-tokenizer-1 | ||||
| TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama tests/test-tokenizer-0-falcon tests/test-tokenizer-1-llama | ||||
|  | ||||
| # Code coverage output files | ||||
| COV_TARGETS = *.gcno tests/*.gcno *.gcda tests/*.gcda *.gcov tests/*.gcov lcov-report gcovr-report | ||||
| @@ -49,7 +49,7 @@ test: $(TEST_TARGETS) | ||||
| 			./$$test_target $(CURDIR)/models/ggml-vocab-llama.gguf; \ | ||||
| 		elif [ "$$test_target" = "tests/test-tokenizer-0-falcon" ]; then \ | ||||
| 			continue; \ | ||||
| 		elif [ "$$test_target" = "tests/test-tokenizer-1" ]; then \ | ||||
| 		elif [ "$$test_target" = "tests/test-tokenizer-1-llama" ]; then \ | ||||
| 			continue; \ | ||||
| 		else \ | ||||
| 			echo "Running test $$test_target..."; \ | ||||
| @@ -605,7 +605,7 @@ tests/test-tokenizer-0-falcon: tests/test-tokenizer-0-falcon.cpp build-info.h gg | ||||
| tests/test-tokenizer-0-llama: tests/test-tokenizer-0-llama.cpp build-info.h ggml.o llama.o common.o $(OBJS) | ||||
| 	$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) | ||||
|  | ||||
| tests/test-tokenizer-1: tests/test-tokenizer-1.cpp build-info.h ggml.o llama.o common.o $(OBJS) | ||||
| tests/test-tokenizer-1-llama: tests/test-tokenizer-1-llama.cpp build-info.h ggml.o llama.o common.o $(OBJS) | ||||
| 	$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) | ||||
|  | ||||
| tests/test-c.o: tests/test-c.c llama.h | ||||
|   | ||||
| @@ -3121,10 +3121,9 @@ struct llm_tokenizer_spm { | ||||
|         while (offs < text.size()) { | ||||
|             llm_symbol sym; | ||||
|             size_t len = utf8_len(text[offs]); | ||||
|             GGML_ASSERT(offs + len <= text.size()); | ||||
|             sym.text = text.c_str() + offs; | ||||
|             sym.n = len; | ||||
|             offs += len; | ||||
|             sym.n = std::min(len, text.size() - offs); | ||||
|             offs += sym.n; | ||||
|             sym.prev = index - 1; | ||||
|             sym.next = offs == text.size() ? -1 : index + 1; | ||||
|             index++; | ||||
| @@ -6218,7 +6217,7 @@ int llama_tokenize_with_model( | ||||
|     auto res = llama_tokenize_internal(model->vocab, text, add_bos); | ||||
|  | ||||
|     if (n_max_tokens < (int) res.size()) { | ||||
|         LLAMA_LOG_ERROR("%s: too many tokens\n", __func__); | ||||
|         // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__); | ||||
|         return -((int) res.size()); | ||||
|     } | ||||
|  | ||||
|   | ||||
| @@ -29,9 +29,8 @@ llama_build_executable(test-tokenizer-0-llama.cpp) | ||||
| llama_test_executable (test-tokenizer-0-llama test-tokenizer-0-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf) | ||||
| llama_build_executable(test-tokenizer-0-falcon.cpp) | ||||
| #llama_test_executable (test-tokenizer-0-falcon test-tokenizer-0-falcon.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf) | ||||
| llama_build_executable(test-tokenizer-1.cpp) | ||||
| # test-tokenizer-1 requires a BPE vocab. re-enable when we have one. | ||||
| #llama_test_executable (test-tokenizer-1.llama test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf) | ||||
| llama_build_executable(test-tokenizer-1-llama.cpp) | ||||
| llama_test_executable (test-tokenizer-1-llama test-tokenizer-1-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf) | ||||
| #llama_test_executable(test-tokenizer-1.aquila test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf) | ||||
| llama_build_and_test_executable(test-grammar-parser.cpp) | ||||
| llama_build_and_test_executable(test-llama-grammar.cpp) | ||||
|   | ||||
| @@ -1,5 +1,6 @@ | ||||
| #include "llama.h" | ||||
| #include "common.h" | ||||
| #include "console.h" | ||||
|  | ||||
| #include <cstdio> | ||||
| #include <string> | ||||
| @@ -89,6 +90,12 @@ int main(int argc, char **argv) { | ||||
|         return 2; | ||||
|     } | ||||
|  | ||||
| #ifdef _WIN32 | ||||
|     // We need this for unicode console support | ||||
|     console::init(false, false); | ||||
|     atexit([]() { console::cleanup(); }); | ||||
| #endif | ||||
|  | ||||
|     bool success = true; | ||||
|  | ||||
|     for (const auto & test_kv : k_tests()) { | ||||
|   | ||||
							
								
								
									
										127
									
								
								tests/test-tokenizer-1-llama.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										127
									
								
								tests/test-tokenizer-1-llama.cpp
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,127 @@ | ||||
| #include "llama.h" | ||||
| #include "common.h" | ||||
| #include "console.h" | ||||
|  | ||||
| #include <cassert> | ||||
| #include <cstdio> | ||||
| #include <cstring> | ||||
| #include <string> | ||||
| #include <codecvt> | ||||
| #include <map> | ||||
| #include <vector> | ||||
| #include <locale> | ||||
|  | ||||
| typedef int codepoint; | ||||
|  | ||||
| std::string codepoint_to_utf8(codepoint cp) { | ||||
|     std::string result; | ||||
|     if (0x00 <= cp && cp <= 0x7f) { | ||||
|         result.push_back(cp); | ||||
|     } else if (0x80 <= cp && cp <= 0x7ff) { | ||||
|         result.push_back(0xc0 | ((cp >> 6) & 0x1f)); | ||||
|         result.push_back(0x80 | (cp & 0x3f)); | ||||
|     } else if (0x800 <= cp && cp <= 0xffff) { | ||||
|         result.push_back(0xe0 | ((cp >> 12) & 0x0f)); | ||||
|         result.push_back(0x80 | ((cp >> 6) & 0x3f)); | ||||
|         result.push_back(0x80 | (cp & 0x3f)); | ||||
|     } else if (0x10000 <= cp && cp <= 0x10ffff) { | ||||
|         result.push_back(0xf0 | ((cp >> 18) & 0x07)); | ||||
|         result.push_back(0x80 | ((cp >> 12) & 0x3f)); | ||||
|         result.push_back(0x80 | ((cp >> 6) & 0x3f)); | ||||
|         result.push_back(0x80 | (cp & 0x3f)); | ||||
|     } else { | ||||
|         throw std::invalid_argument("invalid codepoint"); | ||||
|     } | ||||
|     return result; | ||||
| } | ||||
|  | ||||
| int main(int argc, char **argv) { | ||||
|     if (argc < 2) { | ||||
|         fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]); | ||||
|         return 1; | ||||
|     } | ||||
|  | ||||
|     const std::string fname = argv[1]; | ||||
|  | ||||
|     fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str()); | ||||
|  | ||||
|     llama_model * model; | ||||
|     llama_context * ctx; | ||||
|  | ||||
|     llama_backend_init(false); | ||||
|  | ||||
|     // load the vocab | ||||
|     { | ||||
|         auto lparams = llama_context_default_params(); | ||||
|  | ||||
|         lparams.vocab_only = true; | ||||
|  | ||||
|         model = llama_load_model_from_file(fname.c_str(), lparams); | ||||
|  | ||||
|         if (model == NULL) { | ||||
|             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str()); | ||||
|             return 1; | ||||
|         } | ||||
|  | ||||
|         ctx = llama_new_context_with_model(model, lparams); | ||||
|  | ||||
|         if (ctx == NULL) { | ||||
|             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str()); | ||||
|             llama_free_model(model); | ||||
|             return 1; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     GGML_ASSERT(llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_SPM); | ||||
|  | ||||
| #ifdef _WIN32 | ||||
|     // We need this for unicode console support | ||||
|     console::init(false, false); | ||||
|     atexit([]() { console::cleanup(); }); | ||||
| #endif | ||||
|  | ||||
|     const int n_vocab = llama_n_vocab(ctx); | ||||
|  | ||||
|     for (int i = 0; i < n_vocab; ++i) { | ||||
|         std::string str = llama_detokenize_spm(ctx, std::vector<int>(1, i)); | ||||
|         std::vector<llama_token> tokens = llama_tokenize(ctx, str, false); | ||||
|         std::string check = llama_detokenize_spm(ctx, tokens); | ||||
|         if (check != str) { | ||||
|             fprintf(stderr, "%s : error: token %d detokenizes to >%s<(%llu) but tokenization of this detokenizes to >%s<(%llu)\n", | ||||
|                 __func__, i, str.c_str(), str.length(), check.c_str(), check.length()); | ||||
|             if(i != 3) | ||||
|                 return 2; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     for (codepoint cp = 0x0000; cp < 0xffff; ++cp) { | ||||
|         if (cp < 0xd800 || cp > 0xdfff) { | ||||
|             std::string str = codepoint_to_utf8(cp); | ||||
|             std::vector<llama_token> tokens = llama_tokenize(ctx, str, false); | ||||
|             std::string check = llama_detokenize_spm(ctx, tokens); | ||||
|             if (str != check) { | ||||
|                 fprintf(stderr, "%s : error: codepoint %d detokenizes to >%s<(%llu) instead of >%s<(%llu)\n", | ||||
|                     __func__, cp, check.c_str(), check.length(), str.c_str(), str.length()); | ||||
|                 if(cp != 0 && cp != 9601) | ||||
|                     return 3; | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|     for (codepoint cp = 0x10000; cp < 0x0010ffff; ++cp) { | ||||
|         std::string str = codepoint_to_utf8(cp); | ||||
|         std::vector<llama_token> tokens = llama_tokenize(ctx, str, false); | ||||
|         std::string check = llama_detokenize_spm(ctx, tokens); | ||||
|         if (str != check) { | ||||
|             fprintf(stderr, "%s : error: codepoint %d detokenizes to >%s<(%llu) instead of >%s<(%llu)\n", | ||||
|                 __func__, cp, check.c_str(), check.length(), str.c_str(), str.length()); | ||||
|             return 4; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     llama_free_model(model); | ||||
|     llama_free(ctx); | ||||
|  | ||||
|     llama_backend_free(); | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
| @@ -1,108 +0,0 @@ | ||||
| #include "llama.h" | ||||
| #include "common.h" | ||||
|  | ||||
| #include <cassert> | ||||
| #include <cstdio> | ||||
| #include <cstring> | ||||
| #include <string> | ||||
| #include <codecvt> | ||||
| #include <map> | ||||
| #include <vector> | ||||
| #include <locale> | ||||
|  | ||||
| static std::string escape_whitespace(const std::string& text) { | ||||
|     std::string result = "\xe2\x96\x81"; | ||||
|     for (size_t offs = 0; offs < text.length(); ++offs) { | ||||
|         if (text[offs] == ' ') { | ||||
|             result += "\xe2\x96\x81"; | ||||
|         } else { | ||||
|             result += text[offs]; | ||||
|         } | ||||
|     } | ||||
|     return result; | ||||
| } | ||||
|  | ||||
| int main(int argc, char **argv) { | ||||
|     if (argc < 2) { | ||||
|         fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]); | ||||
|         return 1; | ||||
|     } | ||||
|  | ||||
|     const std::string fname = argv[1]; | ||||
|  | ||||
|     fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str()); | ||||
|  | ||||
|     llama_model * model; | ||||
|     llama_context * ctx; | ||||
|  | ||||
|     llama_backend_init(false); | ||||
|  | ||||
|     // load the vocab | ||||
|     { | ||||
|         auto lparams = llama_context_default_params(); | ||||
|  | ||||
|         lparams.vocab_only = true; | ||||
|  | ||||
|         model = llama_load_model_from_file(fname.c_str(), lparams); | ||||
|  | ||||
|         if (model == NULL) { | ||||
|             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str()); | ||||
|             return 1; | ||||
|         } | ||||
|  | ||||
|         ctx = llama_new_context_with_model(model, lparams); | ||||
|  | ||||
|         if (ctx == NULL) { | ||||
|             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str()); | ||||
|             llama_free_model(model); | ||||
|             return 1; | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     GGML_ASSERT(llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_BPE); | ||||
|  | ||||
|     const int n_vocab = llama_n_vocab(ctx); | ||||
|  | ||||
|     for (int i = 0; i < n_vocab; ++i) { | ||||
|         std::string forward = llama_token_to_piece(ctx, i); | ||||
|         std::vector<llama_token> tokens = llama_tokenize(ctx, forward, false); | ||||
|         if (tokens.size() == 1) { | ||||
|             if (i != tokens[0]) { | ||||
|                 std::string backward = llama_token_to_piece(ctx, tokens[0]); | ||||
|                 fprintf(stderr, "%s : error: token %d is string %s but bpe returns token %d %s\n", | ||||
|                     __func__, i, llama_token_to_piece(ctx, i).c_str(), tokens[0], backward.c_str()); | ||||
|                 return 2; | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
| #ifdef _WIN32 | ||||
|     std::wstring_convert<typename std::codecvt_utf8<char16_t>, char16_t> u16converter; | ||||
|     for (char16_t ch = 0x0000; ch < 0xffff; ++ch) { | ||||
|         std::u16string u16str(1, ch); | ||||
|         std::string str = u16converter.to_bytes(u16str); | ||||
|         std::vector<llama_token> tokens = llama_tokenize(ctx, escape_whitespace(str).c_str(), false); | ||||
|         if (tokens.size() == 1) { | ||||
|             fprintf(stderr, "%s : info: %s tokenized to %d \n", | ||||
|                 __func__, str.c_str(), tokens[0]); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     std::wstring_convert<typename std::codecvt_utf8<char32_t>, char32_t> u32converter; | ||||
|     for (char32_t ch = 0x0000; ch < 0x0010ffff; ++ch) { | ||||
|         std::u32string u32str(1, ch); | ||||
|         std::string str = u32converter.to_bytes(u32str); | ||||
|         std::vector<llama_token> tokens = llama_tokenize(ctx, escape_whitespace(str).c_str(), false); | ||||
|         if (tokens.size() == 1) { | ||||
|             fprintf(stderr, "%s : info: %s tokenized to %d \n", __func__, str.c_str(), tokens[0]); | ||||
|         } | ||||
|     } | ||||
| #endif | ||||
|  | ||||
|     llama_free_model(model); | ||||
|     llama_free(ctx); | ||||
|  | ||||
|     llama_backend_free(); | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
		Reference in New Issue
	
	Block a user
	 goerch
					goerch