mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-28 08:31:25 +00:00 
			
		
		
		
	 edd4c14817
			
		
	
	edd4c14817
	
	
	
		
			
			* tests : write a Python tokenizer test (wip) * llama : prefix input text for tokenization with whitespace * llama : distinguish pieces from decoded text + fix detokenization * common : add comments * examples : no longer manually add leading space when tokenizing * tests : use Python to generate tokenizer tests for C++ * tests : add option to tokenize text files ggml-ci * tests : add test-tokenizer-1.py * llama.cpp : fix LF token * hellaswag : move the concat space for clarity * tests : add falcon tests (py + cpp, currently do not pass Unicode) ggml-ci * common : temporary separate llama_detokenize calls for SPM and BPE --------- Co-authored-by: klosax <131523366+klosax@users.noreply.github.com>
		
			
				
	
	
		
			96 lines
		
	
	
		
			2.8 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			96 lines
		
	
	
		
			2.8 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| # tests with SPM tokenizer
 | |
| 
 | |
| import os
 | |
| import sys
 | |
| import argparse
 | |
| 
 | |
| from sentencepiece import SentencePieceProcessor
 | |
| 
 | |
| parser = argparse.ArgumentParser()
 | |
| parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
 | |
| parser.add_argument("--fname-tok",   help="path to a text file to tokenize")
 | |
| args = parser.parse_args()
 | |
| 
 | |
| dir_tokenizer = args.dir_tokenizer
 | |
| 
 | |
| tokenizer = SentencePieceProcessor(dir_tokenizer + '/tokenizer.model')
 | |
| 
 | |
| tests = [
 | |
|         "",
 | |
|         " ",
 | |
|         "  ",
 | |
|         "   ",
 | |
|         "\t",
 | |
|         "\n",
 | |
|         "\t\n",
 | |
|         "Hello world",
 | |
|         " Hello world",
 | |
|         "Hello World",
 | |
|         " Hello World",
 | |
|         " Hello World!",
 | |
|         "Hello, world!",
 | |
|         " Hello, world!",
 | |
|         " this is 🦙.cpp",
 | |
|         "w048 7tuijk dsdfhu",
 | |
|         "нещо на Български",
 | |
|         "កាន់តែពិសេសអាចខលចេញ",
 | |
|         "🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)",
 | |
|         "Hello",
 | |
|         " Hello",
 | |
|         "  Hello",
 | |
|         "   Hello",
 | |
|         "    Hello",
 | |
|         "    Hello\n    Hello",
 | |
|     ]
 | |
| 
 | |
| 
 | |
| for text in tests:
 | |
|     print('text: ', text)
 | |
|     print('\nwith bos:')
 | |
|     print(tokenizer.encode(text, add_bos=True))
 | |
|     print(tokenizer.decode(tokenizer.encode(text, add_bos=True)))
 | |
|     print('\nwithout bos:')
 | |
|     print(tokenizer.encode(text, add_bos=False))
 | |
|     print(tokenizer.decode(tokenizer.encode(text, add_bos=False)))
 | |
| 
 | |
| print("'" + tokenizer.id_to_piece(15043) + "'") # '_Hello'
 | |
| print("'" + tokenizer.id_to_piece(29871) + "'") # '_'
 | |
| print("'" + tokenizer.decode([15043]) + "'")        # 'Hello'
 | |
| print("'" + tokenizer.decode([15043, 15043]) + "'") # 'Hello Hello'
 | |
| print("'" + tokenizer.decode([29871, 15043]) + "'")               # ' Hello'
 | |
| print("'" + tokenizer.decode([29871, 15043, 29871, 15043]) + "'") # ' Hello  Hello'
 | |
| 
 | |
| print("\n\ntests for C++:\n")
 | |
| for text in tests:
 | |
|     res = tokenizer.encode(text, add_bos=False)
 | |
| 
 | |
|     k = text.replace('\n', '\\n')
 | |
|     k = k.replace('\t', '\\t')
 | |
|     k = '"' + k + '"'
 | |
|     print("{ %-24s, { " % k, end='')
 | |
|     for x in res:
 | |
|         print("%7d," % x, end='')
 | |
|     print(" }, },")
 | |
| 
 | |
| print(tokenizer.encode('hello'))
 | |
| print(tokenizer.encode('world'))
 | |
| print(tokenizer.encode(' world'))
 | |
| print(tokenizer.encode('hello world'))
 | |
| 
 | |
| fname_tok = args.fname_tok
 | |
| if fname_tok:
 | |
|     print('tokenizing file: ', fname_tok)
 | |
|     fname_out = fname_tok + '.tok'
 | |
|     with open(fname_tok, 'r') as f:
 | |
|         lines = f.readlines()
 | |
|         s = ''.join(lines)
 | |
|         res = tokenizer.encode(s, add_bos=True)
 | |
|         # write to file
 | |
|         with open(fname_out, 'w') as f:
 | |
|             for x in res:
 | |
|                 f.write(str(x) + ' ')
 | |
|             f.write('\n')
 | |
|         print('len(res): ', len(res))
 | |
|         print('len(lines): ', len(lines))
 | |
|     print('results written to: ', fname_out)
 |