mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	* server : replace behave with pytest * fix test on windows * misc * add more tests * more tests * styling * log less, fix embd test * added all sequential tests * fix coding style * fix save slot test * add parallel completion test * fix parallel test * remove feature files * update test docs * no cache_prompt for some tests * add test_cache_vs_nocache_prompt
		
			
				
	
	
		
			35 lines
		
	
	
		
			791 B
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			35 lines
		
	
	
		
			791 B
		
	
	
	
		
			Python
		
	
	
	
	
	
import pytest
 | 
						|
from utils import *
 | 
						|
 | 
						|
server = ServerPreset.tinyllama2()
 | 
						|
 | 
						|
 | 
						|
@pytest.fixture(scope="module", autouse=True)
 | 
						|
def create_server():
 | 
						|
    global server
 | 
						|
    server = ServerPreset.tinyllama2()
 | 
						|
 | 
						|
 | 
						|
def test_server_start_simple():
 | 
						|
    global server
 | 
						|
    server.start()
 | 
						|
    res = server.make_request("GET", "/health")
 | 
						|
    assert res.status_code == 200
 | 
						|
 | 
						|
 | 
						|
def test_server_props():
 | 
						|
    global server
 | 
						|
    server.start()
 | 
						|
    res = server.make_request("GET", "/props")
 | 
						|
    assert res.status_code == 200
 | 
						|
    assert res.body["total_slots"] == server.n_slots
 | 
						|
 | 
						|
 | 
						|
def test_server_models():
 | 
						|
    global server
 | 
						|
    server.start()
 | 
						|
    res = server.make_request("GET", "/models")
 | 
						|
    assert res.status_code == 200
 | 
						|
    assert len(res.body["data"]) == 1
 | 
						|
    assert res.body["data"][0]["id"] == server.model_alias
 |