mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	fix missing n_past in various places
this is actually a revert of cda0e4b648
			
			
This commit is contained in:
		@@ -182,7 +182,7 @@ int main(int argc, char ** argv) {
 | 
			
		||||
            // prepare the next batch with the sampled token
 | 
			
		||||
            llama_batch_ext_clear(batch);
 | 
			
		||||
            llama_seq_id seq_id = 0;
 | 
			
		||||
            llama_batch_ext_add_text(batch, new_token_id, 0, &seq_id, 1, true);
 | 
			
		||||
            llama_batch_ext_add_text(batch, new_token_id, n_pos, &seq_id, 1, true);
 | 
			
		||||
 | 
			
		||||
            n_decode += 1;
 | 
			
		||||
        }
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user