mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	fix llama_batch_ext_init_from_text
This commit is contained in:
		| @@ -134,7 +134,7 @@ static bool run(llama_context * ctx, const common_params & params) { | ||||
|  | ||||
|     std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, add_bos); | ||||
|  | ||||
|     llama_batch_ext_ptr batch(llama_batch_ext_init_from_text(tokens.data(), tokens.size(), 0, 0)); | ||||
|     llama_batch_ext_ptr batch(llama_batch_ext_init_from_text(tokens.data(), tokens.size(), 0, 0, true)); | ||||
|     if (llama_decode_ext(ctx, batch.get())) { | ||||
|         LOG_ERR("%s : failed to eval\n", __func__); | ||||
|         return false; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Xuan Son Nguyen
					Xuan Son Nguyen