mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	kompute : llama-bench support and ggml_cpu_has_kompute() (#5226)
This commit is contained in:
		| @@ -563,6 +563,7 @@ struct test { | ||||
|     static const bool cuda; | ||||
|     static const bool opencl; | ||||
|     static const bool vulkan; | ||||
|     static const bool kompute; | ||||
|     static const bool metal; | ||||
|     static const bool gpu_blas; | ||||
|     static const bool blas; | ||||
| @@ -647,6 +648,9 @@ struct test { | ||||
|         if (vulkan) { | ||||
|             return "Vulkan"; | ||||
|         } | ||||
|         if (kompute) { | ||||
|             return "Kompute"; | ||||
|         } | ||||
|         if (metal) { | ||||
|             return "Metal"; | ||||
|         } | ||||
| @@ -662,7 +666,7 @@ struct test { | ||||
|     static const std::vector<std::string> & get_fields() { | ||||
|         static const std::vector<std::string> fields = { | ||||
|             "build_commit", "build_number", | ||||
|             "cuda", "opencl", "vulkan", "metal", "gpu_blas", "blas", | ||||
|             "cuda", "opencl", "vulkan", "kompute", "metal", "gpu_blas", "blas", | ||||
|             "cpu_info", "gpu_info", | ||||
|             "model_filename", "model_type", "model_size", "model_n_params", | ||||
|             "n_batch", "n_threads", "type_k", "type_v", | ||||
| @@ -686,8 +690,9 @@ struct test { | ||||
|             field == "avg_ns" || field == "stddev_ns") { | ||||
|             return INT; | ||||
|         } | ||||
|         if (field == "cuda" || field == "opencl"  || field == "vulkan"|| field == "metal" || field == "gpu_blas" || field == "blas" || | ||||
|             field == "f16_kv" || field == "no_kv_offload" || field == "mul_mat_q") { | ||||
|         if (field == "cuda" || field == "opencl"  || field == "vulkan" || field == "kompute" || field == "metal" || | ||||
|             field == "gpu_blas" || field == "blas" || field == "f16_kv" || field == "no_kv_offload" || | ||||
|             field == "mul_mat_q") { | ||||
|             return BOOL; | ||||
|         } | ||||
|         if (field == "avg_ts" || field == "stddev_ts") { | ||||
| @@ -714,7 +719,8 @@ struct test { | ||||
|         } | ||||
|         std::vector<std::string> values = { | ||||
|             build_commit, std::to_string(build_number), | ||||
|             std::to_string(cuda), std::to_string(opencl), std::to_string(vulkan), std::to_string(metal), std::to_string(gpu_blas), std::to_string(blas), | ||||
|             std::to_string(cuda), std::to_string(opencl), std::to_string(vulkan), std::to_string(vulkan), | ||||
|             std::to_string(metal), std::to_string(gpu_blas), std::to_string(blas), | ||||
|             cpu_info, gpu_info, | ||||
|             model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params), | ||||
|             std::to_string(n_batch), std::to_string(n_threads), ggml_type_name(type_k), ggml_type_name(type_v), | ||||
| @@ -743,6 +749,7 @@ const int         test::build_number = LLAMA_BUILD_NUMBER; | ||||
| const bool        test::cuda         = !!ggml_cpu_has_cublas(); | ||||
| const bool        test::opencl       = !!ggml_cpu_has_clblast(); | ||||
| const bool        test::vulkan       = !!ggml_cpu_has_vulkan(); | ||||
| const bool        test::kompute      = !!ggml_cpu_has_kompute(); | ||||
| const bool        test::metal        = !!ggml_cpu_has_metal(); | ||||
| const bool        test::gpu_blas     = !!ggml_cpu_has_gpublas(); | ||||
| const bool        test::blas         = !!ggml_cpu_has_blas(); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Jared Van Bortel
					Jared Van Bortel