mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	simple-chat : only add bos on first prompt (#10129)
This commit is contained in:
		| @@ -96,7 +96,7 @@ int main(int argc, char ** argv) { | |||||||
|         // tokenize the prompt |         // tokenize the prompt | ||||||
|         const int n_prompt_tokens = -llama_tokenize(model, prompt.c_str(), prompt.size(), NULL, 0, true, true); |         const int n_prompt_tokens = -llama_tokenize(model, prompt.c_str(), prompt.size(), NULL, 0, true, true); | ||||||
|         std::vector<llama_token> prompt_tokens(n_prompt_tokens); |         std::vector<llama_token> prompt_tokens(n_prompt_tokens); | ||||||
|         if (llama_tokenize(model, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true, true) < 0) { |         if (llama_tokenize(model, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), llama_get_kv_cache_used_cells(ctx) == 0, true) < 0) { | ||||||
|             GGML_ABORT("failed to tokenize the prompt\n"); |             GGML_ABORT("failed to tokenize the prompt\n"); | ||||||
|         } |         } | ||||||
|  |  | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Diego Devesa
					Diego Devesa