mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	ggml : detect SSSE3 (#2825)
* ggml : add ggml_cpu_has_ssse3 * llama : show SSSE3 in system info
This commit is contained in:
		 Przemysław Pawełczyk
					Przemysław Pawełczyk
				
			
				
					committed by
					
						 GitHub
						GitHub
					
				
			
			
				
	
			
			
			 GitHub
						GitHub
					
				
			
						parent
						
							789c8c945a
						
					
				
				
					commit
					1591e2e590
				
			
							
								
								
									
										8
									
								
								ggml.c
									
									
									
									
									
								
							
							
						
						
									
										8
									
								
								ggml.c
									
									
									
									
									
								
							| @@ -20516,6 +20516,14 @@ int ggml_cpu_has_sse3(void) { | |||||||
| #endif | #endif | ||||||
| } | } | ||||||
|  |  | ||||||
|  | int ggml_cpu_has_ssse3(void) { | ||||||
|  | #if defined(__SSSE3__) | ||||||
|  |     return 1; | ||||||
|  | #else | ||||||
|  |     return 0; | ||||||
|  | #endif | ||||||
|  | } | ||||||
|  |  | ||||||
| int ggml_cpu_has_vsx(void) { | int ggml_cpu_has_vsx(void) { | ||||||
| #if defined(__POWER9_VECTOR__) | #if defined(__POWER9_VECTOR__) | ||||||
|     return 1; |     return 1; | ||||||
|   | |||||||
							
								
								
									
										1
									
								
								ggml.h
									
									
									
									
									
								
							
							
						
						
									
										1
									
								
								ggml.h
									
									
									
									
									
								
							| @@ -1944,6 +1944,7 @@ extern "C" { | |||||||
|     GGML_API int ggml_cpu_has_clblast    (void); |     GGML_API int ggml_cpu_has_clblast    (void); | ||||||
|     GGML_API int ggml_cpu_has_gpublas    (void); |     GGML_API int ggml_cpu_has_gpublas    (void); | ||||||
|     GGML_API int ggml_cpu_has_sse3       (void); |     GGML_API int ggml_cpu_has_sse3       (void); | ||||||
|  |     GGML_API int ggml_cpu_has_ssse3      (void); | ||||||
|     GGML_API int ggml_cpu_has_vsx        (void); |     GGML_API int ggml_cpu_has_vsx        (void); | ||||||
|  |  | ||||||
|     // |     // | ||||||
|   | |||||||
| @@ -6194,6 +6194,7 @@ const char * llama_print_system_info(void) { | |||||||
|     s += "WASM_SIMD = "   + std::to_string(ggml_cpu_has_wasm_simd())   + " | "; |     s += "WASM_SIMD = "   + std::to_string(ggml_cpu_has_wasm_simd())   + " | "; | ||||||
|     s += "BLAS = "        + std::to_string(ggml_cpu_has_blas())        + " | "; |     s += "BLAS = "        + std::to_string(ggml_cpu_has_blas())        + " | "; | ||||||
|     s += "SSE3 = "        + std::to_string(ggml_cpu_has_sse3())        + " | "; |     s += "SSE3 = "        + std::to_string(ggml_cpu_has_sse3())        + " | "; | ||||||
|  |     s += "SSSE3 = "       + std::to_string(ggml_cpu_has_ssse3())       + " | "; | ||||||
|     s += "VSX = "         + std::to_string(ggml_cpu_has_vsx())         + " | "; |     s += "VSX = "         + std::to_string(ggml_cpu_has_vsx())         + " | "; | ||||||
|  |  | ||||||
|     return s.c_str(); |     return s.c_str(); | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user