mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	 a016026a3a
			
		
	
	a016026a3a
	
	
	
		
			
			* server: bench: init * server: bench: reduce list of GPU nodes * server: bench: fix graph, fix output artifact * ci: bench: add mermaid in case of image cannot be uploaded * ci: bench: more resilient, more metrics * ci: bench: trigger build * ci: bench: fix duration * ci: bench: fix typo * ci: bench: fix mermaid values, markdown generated * typo on the step name Co-authored-by: Xuan Son Nguyen <thichthat@gmail.com> * ci: bench: trailing spaces * ci: bench: move images in a details section * ci: bench: reduce bullet point size --------- Co-authored-by: Xuan Son Nguyen <thichthat@gmail.com>
		
			
				
	
	
		
			10 lines
		
	
	
		
			183 B
		
	
	
	
		
			YAML
		
	
	
	
	
	
			
		
		
	
	
			10 lines
		
	
	
		
			183 B
		
	
	
	
		
			YAML
		
	
	
	
	
	
| global:
 | |
|   scrape_interval:     10s
 | |
|   external_labels:
 | |
|     llamacpp: 'server'
 | |
| 
 | |
| scrape_configs:
 | |
|   - job_name: 'llama.cpp server'
 | |
|     static_configs:
 | |
|       - targets: ['localhost:8080']
 |