mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	server : fix parallel generation with very small batch sizes
This commit is contained in:
		@@ -753,13 +753,13 @@ struct server_context {
 | 
			
		||||
        default_generation_settings_for_props = get_formated_generation(slots.front());
 | 
			
		||||
        default_generation_settings_for_props["seed"] = -1;
 | 
			
		||||
 | 
			
		||||
        // the update_slots() logic will always submit a maximum of n_batch tokens
 | 
			
		||||
        // the update_slots() logic will always submit a maximum of n_batch or n_parralel tokens
 | 
			
		||||
        // note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used)
 | 
			
		||||
        {
 | 
			
		||||
            const int32_t n_batch = llama_n_batch(ctx);
 | 
			
		||||
 | 
			
		||||
            // only a single seq_id per token is needed
 | 
			
		||||
            batch = llama_batch_init(n_batch, 0, 1);
 | 
			
		||||
            batch = llama_batch_init(std::max(n_batch, params.n_parallel), 0, 1);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        metrics.init();
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user