mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llm : add Falcon support (#2717)
* llama : refactor GGUF constants into static maps * llama : check if model architecture is known * llama : refactor llama_model_load_internal() * gguf : add KV constant maps * llm : read arch-specific KVs * convert : add dummy scores + types * falcon : load tensor data (CPU only) * llama : fix loading progress bar * llama : add arch member to llama_model * falcon : CPU inference working * falcon : support non-40B models * falcon : minor * llama : minor updates ggml-ci * convert-falcon-hf-to-gguf.py : fix special token mapping * llama.cpp : llama default UNK token = id 0 * llama.cpp : fix bpe tokenizer * llama.cpp : fix the fix of bpe tokenizer * ggml : pass eps to ggml_norm * metal : implement RoPE (mode = 2) + avoid ggml_repeat * ggml : ggml_repeat always creates new tensor * falcon : copy-paste self-attention from LLaMA * metal : print extra compute pipeline info * falcon : minor changes (still chasing the Metal problem) * llama.cpp : fix linefeed token * metal : fix GELU kernel numerical stability by using precise::tanh * metal : temporary workaround for the concurrency optimization bug * falcon : add CUDA offloading (#2739) * llama : better model naming and size reporting * llama : prep new tokenizer support * llama : advanced BPE tokenizer based on ggllm.cpp imlpementation * llama : remove oboslete comment ggml-ci * common : remove obsolete BPE API + disable test-tokenizer-1 * llama : revert BPE special-case in llama_byte_to_token() * cuda : add TODOs for RoPE NeoX implementation * llama : default special tokens based on vocab type * perplexity : add log for start of tokenization --------- Co-authored-by: klosax <131523366+klosax@users.noreply.github.com> Co-authored-by: slaren <slarengh@gmail.com>
This commit is contained in:
		| @@ -67,11 +67,13 @@ int main(int argc, char **argv) { | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     GGML_ASSERT(llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_BPE); | ||||
|  | ||||
|     const int n_vocab = llama_n_vocab(ctx); | ||||
|  | ||||
|     for (int i = 0; i < n_vocab; ++i) { | ||||
|         std::string forward = llama_token_to_str_bpe(ctx, i); | ||||
|         std::vector<llama_token> tokens = llama_tokenize_bpe(ctx, forward, false); | ||||
|         std::string forward = llama_token_to_str(ctx, i); | ||||
|         std::vector<llama_token> tokens = llama_tokenize(ctx, forward, false); | ||||
|         if (tokens.size() == 1) { | ||||
|             if (i != tokens[0]) { | ||||
|                 std::string backward = llama_token_to_str(ctx, tokens[0]); | ||||
| @@ -79,16 +81,6 @@ int main(int argc, char **argv) { | ||||
|                     __func__, i, llama_token_to_str(ctx, i).c_str(), tokens[0], backward.c_str()); | ||||
|                 return 2; | ||||
|             } | ||||
|         } else { | ||||
|             llama_token_type type = llama_token_get_type(ctx, i); | ||||
|             if (type == LLAMA_TOKEN_TYPE_UNKNOWN || type == LLAMA_TOKEN_TYPE_CONTROL || type == LLAMA_TOKEN_TYPE_BYTE) { | ||||
|                 fprintf(stderr, "%s : info: token %d is string %s and bpe returns tokens %s\n", | ||||
|                     __func__, i, llama_token_to_str(ctx, i).c_str(), unescape_whitespace(ctx, tokens).c_str()); | ||||
|             } else { | ||||
|                 fprintf(stderr, "%s : error: token %d is string %s but bpe returns tokens %s\n", | ||||
|                     __func__, i, llama_token_to_str(ctx, i).c_str(), unescape_whitespace(ctx, tokens).c_str()); | ||||
|                 return 2; | ||||
|             } | ||||
|         } | ||||
|     } | ||||
|  | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov