mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	fix llama_batch_ext_init_from_text
This commit is contained in:
		| @@ -113,7 +113,7 @@ int main(int argc, char ** argv) { | ||||
|     struct common_sampler * smpl = common_sampler_init(model_tgt, params.sampling); | ||||
|  | ||||
|     // eval the prompt | ||||
|     llama_batch_ext_ptr batch(llama_batch_ext_init_from_text(inp.data(), inp.size() - 1, 0, 0)); | ||||
|     llama_batch_ext_ptr batch(llama_batch_ext_init_from_text(inp.data(), inp.size() - 1, 0, 0, true)); | ||||
|     llama_decode_ext(ctx_tgt, batch.get()); | ||||
|  | ||||
|     // note: keep the last token separate! | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Xuan Son Nguyen
					Xuan Son Nguyen