mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	Apply suggestions from code review
This commit is contained in:
		| @@ -147,7 +147,7 @@ int main(int argc, char ** argv) { | ||||
|                     llama_batch_ext_clear(batch); | ||||
|  | ||||
|                     for (int j = 0; j < pl; ++j) { | ||||
|                         llama_batch_ext_add_text(batch, 0, pp + i, &j, 1, false); | ||||
|                         llama_batch_ext_add_text(batch, 0, pp + i, &j, 1, true); | ||||
|                     } | ||||
|  | ||||
|                     if (!decode_helper(ctx, batch, ctx_params.n_batch)) { | ||||
|   | ||||
| @@ -196,7 +196,7 @@ int main(int argc, char ** argv) { | ||||
|             i_batch[i] = llama_batch_ext_get_n_tokens(batch); | ||||
|  | ||||
|             // push this new token for next evaluation | ||||
|             llama_batch_ext_add_text(batch, new_token_id, n_cur, &i, 1, false); | ||||
|             llama_batch_ext_add_text(batch, new_token_id, n_cur, &i, 1, true); | ||||
|  | ||||
|             n_decode += 1; | ||||
|         } | ||||
|   | ||||
| @@ -670,7 +670,6 @@ int main(int argc, char ** argv) { | ||||
|                 LOG_DBG("eval: %s\n", string_from(ctx, embd).c_str()); | ||||
|  | ||||
|                 auto batch = llama_batch_ext_ptr::init_from_text(&embd[i], n_eval, n_past, 0, true); | ||||
|                 llama_batch_ext_set_output_last(batch.get()); | ||||
|                 if (llama_decode_ext(ctx, batch.get())) { | ||||
|                     LOG_ERR("%s : failed to eval\n", __func__); | ||||
|                     return 1; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Xuan-Son Nguyen
					Xuan-Son Nguyen