mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	infill : add new example + extend server API (#3296)
* vvhg-code-infill (#1) * infill in separate example (#2) * reverted changes to main and added infill example * cleanup * naming improvement * make : add missing blank line * fix missing semicolon * brought infill up to current main code * cleanup --------- Co-authored-by: Cebtenzzre <cebtenzzre@gmail.com>
This commit is contained in:
		
							
								
								
									
										20
									
								
								llama.cpp
									
									
									
									
									
								
							
							
						
						
									
										20
									
								
								llama.cpp
									
									
									
									
									
								
							| @@ -1076,6 +1076,10 @@ struct llama_vocab { | ||||
|     id special_pad_id = -1; | ||||
|  | ||||
|     id linefeed_id = 13; | ||||
|     id special_prefix_id = 32007; | ||||
|     id special_middle_id = 32009; | ||||
|     id special_suffix_id = 32008; | ||||
|     id special_eot_id = 32010; | ||||
|  | ||||
|     int find_bpe_rank(std::string token_left, std::string token_right) const { | ||||
|         replace_all(token_left,  " ",  "\u0120"); | ||||
| @@ -7489,6 +7493,22 @@ llama_token llama_token_eos(const struct llama_context * ctx) { | ||||
| llama_token llama_token_nl(const struct llama_context * ctx) { | ||||
|     return ctx->model.vocab.linefeed_id; | ||||
| } | ||||
| llama_token llama_token_prefix(const struct llama_context * ctx) { | ||||
|     return ctx->model.vocab.special_prefix_id; | ||||
| } | ||||
|  | ||||
| llama_token llama_token_middle(const struct llama_context * ctx) { | ||||
|     return ctx->model.vocab.special_middle_id; | ||||
| } | ||||
|  | ||||
| llama_token llama_token_suffix(const struct llama_context * ctx) { | ||||
|     return ctx->model.vocab.special_suffix_id; | ||||
| } | ||||
|  | ||||
| llama_token llama_token_eot(const struct llama_context * ctx) { | ||||
|     return ctx->model.vocab.special_eot_id; | ||||
| } | ||||
|  | ||||
|  | ||||
| int llama_tokenize( | ||||
|     const struct llama_model * model, | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 vvhg1
					vvhg1