mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	server : fix smart slot selection (#8020)
This commit is contained in:
		| @@ -1594,7 +1594,7 @@ struct server_context { | |||||||
|                     } else { |                     } else { | ||||||
|                         std::string prompt; |                         std::string prompt; | ||||||
|                         if (task.data.contains("prompt") && task.data.at("prompt").is_string()) { |                         if (task.data.contains("prompt") && task.data.at("prompt").is_string()) { | ||||||
|                             json_value(task.data, "prompt", std::string()); |                             prompt = json_value(task.data, "prompt", std::string()); | ||||||
|                         } |                         } | ||||||
|  |  | ||||||
|                         slot = get_available_slot(prompt); |                         slot = get_available_slot(prompt); | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 sasha0552
					sasha0552