mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	server : add more test cases (#10569)
* server : add split model test * add test speculative * add invalid cases
This commit is contained in:
		| @@ -8,6 +8,7 @@ def create_server(): | ||||
|     global server | ||||
|     server = ServerPreset.tinyllama_infill() | ||||
|  | ||||
|  | ||||
| def test_infill_without_input_extra(): | ||||
|     global server | ||||
|     server.start() | ||||
| @@ -19,6 +20,7 @@ def test_infill_without_input_extra(): | ||||
|     assert res.status_code == 200 | ||||
|     assert match_regex("(One|day|she|saw|big|scary|bird)+", res.body["content"]) | ||||
|  | ||||
|  | ||||
| def test_infill_with_input_extra(): | ||||
|     global server | ||||
|     server.start() | ||||
| @@ -33,3 +35,23 @@ def test_infill_with_input_extra(): | ||||
|     }) | ||||
|     assert res.status_code == 200 | ||||
|     assert match_regex("(cuts|Jimmy|mom|came|into|the|room)+", res.body["content"]) | ||||
|  | ||||
|  | ||||
| @pytest.mark.parametrize("input_extra", [ | ||||
|     {}, | ||||
|     {"filename": "ok"}, | ||||
|     {"filename": 123}, | ||||
|     {"filename": 123, "text": "abc"}, | ||||
|     {"filename": 123, "text": 456}, | ||||
| ]) | ||||
| def test_invalid_input_extra_req(input_extra): | ||||
|     global server | ||||
|     server.start() | ||||
|     res = server.make_request("POST", "/infill", data={ | ||||
|         "prompt": "Complete this", | ||||
|         "input_extra": [input_extra], | ||||
|         "input_prefix": "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n    int n_threads = llama_", | ||||
|         "input_suffix": "}\n", | ||||
|     }) | ||||
|     assert res.status_code == 400 | ||||
|     assert "error" in res.body | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Xuan Son Nguyen
					Xuan Son Nguyen