mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	examples: fix android example cannot be generated continuously (#8621)
When generation ends `completion_loop()` should return a NULL, not the empty string
This commit is contained in:
		| @@ -409,7 +409,7 @@ Java_android_llama_cpp_LLamaAndroid_completion_1loop( | ||||
|  | ||||
|     const auto n_cur = env->CallIntMethod(intvar_ncur, la_int_var_value); | ||||
|     if (llama_token_is_eog(model, new_token_id) || n_cur == n_len) { | ||||
|         return env->NewStringUTF(""); | ||||
|         return nullptr; | ||||
|     } | ||||
|  | ||||
|     auto new_token_chars = llama_token_to_piece(context, new_token_id); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 devojony
					devojony