mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	* ggml : build backends as libraries --------- Signed-off-by: Xiaodong Ye <xiaodong.ye@mthreads.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: R0CKSTAR <xiaodong.ye@mthreads.com>
		
			
				
	
	
		
			13 lines
		
	
	
		
			250 B
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			13 lines
		
	
	
		
			250 B
		
	
	
	
		
			C++
		
	
	
	
	
	
#include "ggml-threading.h"
 | 
						|
#include <mutex>
 | 
						|
 | 
						|
std::mutex ggml_critical_section_mutex;
 | 
						|
 | 
						|
void ggml_critical_section_start() {
 | 
						|
    ggml_critical_section_mutex.lock();
 | 
						|
}
 | 
						|
 | 
						|
void ggml_critical_section_end(void) {
 | 
						|
    ggml_critical_section_mutex.unlock();
 | 
						|
}
 |