mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	server : replace behave with pytest (#10416)
* server : replace behave with pytest * fix test on windows * misc * add more tests * more tests * styling * log less, fix embd test * added all sequential tests * fix coding style * fix save slot test * add parallel completion test * fix parallel test * remove feature files * update test docs * no cache_prompt for some tests * add test_cache_vs_nocache_prompt
This commit is contained in:
		
							
								
								
									
										15
									
								
								examples/server/tests/conftest.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								examples/server/tests/conftest.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| import pytest | ||||
| from utils import * | ||||
|  | ||||
|  | ||||
| # ref: https://stackoverflow.com/questions/22627659/run-code-before-and-after-each-test-in-py-test | ||||
| @pytest.fixture(autouse=True) | ||||
| def stop_server_after_each_test(): | ||||
|     # do nothing before each test | ||||
|     yield | ||||
|     # stop all servers after each test | ||||
|     instances = set( | ||||
|         server_instances | ||||
|     )  # copy the set to prevent 'Set changed size during iteration' | ||||
|     for server in instances: | ||||
|         server.stop() | ||||
		Reference in New Issue
	
	Block a user
	 Xuan Son Nguyen
					Xuan Son Nguyen