mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	* minor : code style * server : fix prompt similarity calculation * server : initial host-memory prompt caching * cont * server : refactor * cont * cont : make the server task of the slot const * cont : minor [no ci] * server : cache prompts and checkpoints only for completion tasks * server : improve prompt caching logic * cont : fix check for number of cached prompts [no ci] * server : improve caching logic, add -cram CLI arg * server : print prompt mismatch info * cont : better naming [no ci] * server : improve prompt cache loading logic * server : add option to debug the slot contents (#16482) * server : add option to debug the slot contents * Update tools/server/server.cpp --------- Co-authored-by: Xuan-Son Nguyen <son@huggingface.co> * server : add option to disable prompt cache --------- Co-authored-by: Xuan-Son Nguyen <son@huggingface.co>
		
			
				
	
	
		
			102 lines
		
	
	
		
			2.8 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			102 lines
		
	
	
		
			2.8 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
import pytest
 | 
						|
import requests
 | 
						|
from utils import *
 | 
						|
 | 
						|
server = ServerPreset.tinyllama2()
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="session", autouse=True)
 | 
						|
def do_something():
 | 
						|
    # this will be run once per test session, before any tests
 | 
						|
    ServerPreset.load_all()
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(autouse=True)
 | 
						|
def create_server():
 | 
						|
    global server
 | 
						|
    server = ServerPreset.tinyllama2()
 | 
						|
 | 
						|
 | 
						|
def test_server_start_simple():
 | 
						|
    global server
 | 
						|
    server.start()
 | 
						|
    res = server.make_request("GET", "/health")
 | 
						|
    assert res.status_code == 200
 | 
						|
 | 
						|
 | 
						|
def test_server_props():
 | 
						|
    global server
 | 
						|
    server.start()
 | 
						|
    res = server.make_request("GET", "/props")
 | 
						|
    assert res.status_code == 200
 | 
						|
    assert ".gguf" in res.body["model_path"]
 | 
						|
    assert res.body["total_slots"] == server.n_slots
 | 
						|
    default_val = res.body["default_generation_settings"]
 | 
						|
    assert server.n_ctx is not None and server.n_slots is not None
 | 
						|
    assert default_val["n_ctx"] == server.n_ctx / server.n_slots
 | 
						|
    assert default_val["params"]["seed"] == server.seed
 | 
						|
 | 
						|
 | 
						|
def test_server_models():
 | 
						|
    global server
 | 
						|
    server.start()
 | 
						|
    res = server.make_request("GET", "/models")
 | 
						|
    assert res.status_code == 200
 | 
						|
    assert len(res.body["data"]) == 1
 | 
						|
    assert res.body["data"][0]["id"] == server.model_alias
 | 
						|
 | 
						|
 | 
						|
def test_server_slots():
 | 
						|
    global server
 | 
						|
 | 
						|
    # without slots endpoint enabled, this should return error
 | 
						|
    server.server_slots = False
 | 
						|
    server.start()
 | 
						|
    res = server.make_request("GET", "/slots")
 | 
						|
    assert res.status_code == 501 # ERROR_TYPE_NOT_SUPPORTED
 | 
						|
    assert "error" in res.body
 | 
						|
    server.stop()
 | 
						|
 | 
						|
    # with slots endpoint enabled, this should return slots info
 | 
						|
    server.server_slots = True
 | 
						|
    server.n_slots = 2
 | 
						|
    server.start()
 | 
						|
    res = server.make_request("GET", "/slots")
 | 
						|
    assert res.status_code == 200
 | 
						|
    assert len(res.body) == server.n_slots
 | 
						|
    assert server.n_ctx is not None and server.n_slots is not None
 | 
						|
    assert res.body[0]["n_ctx"] == server.n_ctx / server.n_slots
 | 
						|
    assert "params" not in res.body[0]
 | 
						|
 | 
						|
 | 
						|
def test_load_split_model():
 | 
						|
    global server
 | 
						|
    server.model_hf_repo = "ggml-org/models"
 | 
						|
    server.model_hf_file = "tinyllamas/split/stories15M-q8_0-00001-of-00003.gguf"
 | 
						|
    server.model_alias = "tinyllama-split"
 | 
						|
    server.start()
 | 
						|
    res = server.make_request("POST", "/completion", data={
 | 
						|
        "n_predict": 16,
 | 
						|
        "prompt": "Hello",
 | 
						|
        "temperature": 0.0,
 | 
						|
    })
 | 
						|
    assert res.status_code == 200
 | 
						|
    assert match_regex("(little|girl)+", res.body["content"])
 | 
						|
 | 
						|
 | 
						|
def test_no_webui():
 | 
						|
    global server
 | 
						|
    # default: webui enabled
 | 
						|
    server.start()
 | 
						|
    url = f"http://{server.server_host}:{server.server_port}"
 | 
						|
    res = requests.get(url)
 | 
						|
    assert res.status_code == 200
 | 
						|
    assert "<!doctype html>" in res.text
 | 
						|
    server.stop()
 | 
						|
 | 
						|
    # with --no-webui
 | 
						|
    server.no_webui = True
 | 
						|
    server.start()
 | 
						|
    res = requests.get(url)
 | 
						|
    assert res.status_code == 404
 |