mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	llama_batch_ext_ptr::from_text/embd
This commit is contained in:
		@@ -101,7 +101,7 @@ static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_toke
 | 
			
		||||
        if (n_eval > n_batch) {
 | 
			
		||||
            n_eval = n_batch;
 | 
			
		||||
        }
 | 
			
		||||
        llama_batch_ext_ptr batch(llama_batch_ext_init_from_text(&tokens[i], n_eval, *n_past, 0, true));
 | 
			
		||||
        auto batch = llama_batch_ext_ptr::from_text(&tokens[i], n_eval, *n_past, 0, true);
 | 
			
		||||
        if (llama_decode_ext(ctx_llama, batch.get())) {
 | 
			
		||||
            LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
 | 
			
		||||
            return false;
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user