mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	server : fix typo in comment
This commit is contained in:
		@@ -754,7 +754,7 @@ struct server_context {
 | 
				
			|||||||
        default_generation_settings_for_props = get_formated_generation(slots.front());
 | 
					        default_generation_settings_for_props = get_formated_generation(slots.front());
 | 
				
			||||||
        default_generation_settings_for_props["seed"] = -1;
 | 
					        default_generation_settings_for_props["seed"] = -1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // the update_slots() logic will always submit a maximum of n_batch or n_parralel tokens
 | 
					        // the update_slots() logic will always submit a maximum of n_batch or n_parallel tokens
 | 
				
			||||||
        // note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used)
 | 
					        // note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used)
 | 
				
			||||||
        {
 | 
					        {
 | 
				
			||||||
            const int32_t n_batch = llama_n_batch(ctx);
 | 
					            const int32_t n_batch = llama_n_batch(ctx);
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user