mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	examples, ggml : fix GCC compiler warnings (#10983)
Warning types fixed (observed under MSYS2 GCC 14.2.0): * format '%ld' expects argument of type 'long int', but argument has type 'size_t' * llama.cpp/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp:81:46: warning: missing initializer for member '_STARTUPINFOA::lpDesktop' [-Wmissing-field-initializers] (emitted for all struct field except first)
This commit is contained in:
		| @@ -15,7 +15,7 @@ static void run( | ||||
|     for (size_t il = 0; il < v_input.size(); ++il) { | ||||
|         // prepare output vector | ||||
|         struct ggml_tensor * ctrl_out = v_output[il]; | ||||
|         ggml_format_name(ctrl_out, "direction.%ld", il+1); | ||||
|         ggml_format_name(ctrl_out, "direction.%zu", il+1); | ||||
|  | ||||
|         // calculate mean vector | ||||
|         struct ggml_tensor * t_layer = v_input[il]; | ||||
|   | ||||
| @@ -302,7 +302,7 @@ static void run_pca( | ||||
|  | ||||
|         // prepare output vector | ||||
|         struct ggml_tensor * ctrl_out = v_output[il]; | ||||
|         ggml_format_name(ctrl_out, "direction.%ld", il+1); | ||||
|         ggml_format_name(ctrl_out, "direction.%zu", il+1); | ||||
|  | ||||
|         // run power_iteration | ||||
|         params.i_layer = il; | ||||
|   | ||||
| @@ -265,8 +265,8 @@ struct lora_merge_ctx { | ||||
|             fout.write((const char *)data.data(), data.size()); | ||||
|         } | ||||
|  | ||||
|         printf("%s : merged %ld tensors with lora adapters\n", __func__, n_merged); | ||||
|         printf("%s : wrote %ld tensors to output file\n", __func__, trans.size()); | ||||
|         printf("%s : merged %zu tensors with lora adapters\n", __func__, n_merged); | ||||
|         printf("%s : wrote %zu tensors to output file\n", __func__, trans.size()); | ||||
|     } | ||||
|  | ||||
|     void copy_tensor(struct ggml_tensor * base) { | ||||
| @@ -352,7 +352,7 @@ struct lora_merge_ctx { | ||||
|                 const float scale = alpha ? adapters[i]->scale * alpha / rank : adapters[i]->scale; | ||||
|                 delta = ggml_scale(ctx0, delta, scale); | ||||
|                 cur = ggml_add(ctx0, delta, cur); | ||||
|                 printf("%s :   + merging from adapter[%ld] type=%s\n", __func__, i, ggml_type_name(inp_a[i]->type)); | ||||
|                 printf("%s :   + merging from adapter[%zu] type=%s\n", __func__, i, ggml_type_name(inp_a[i]->type)); | ||||
|                 printf("%s :     input_scale=%f calculated_scale=%f rank=%d\n", __func__, adapters[i]->scale, scale, (int) inp_b[i]->ne[0]); | ||||
|             } | ||||
|             cur = ggml_cast(ctx0, cur, out->type); | ||||
|   | ||||
| @@ -78,7 +78,8 @@ void execute_command(const std::string& command, std::string& stdout_str, std::s | ||||
|     } | ||||
|  | ||||
|     PROCESS_INFORMATION pi; | ||||
|     STARTUPINFOA si = { sizeof(STARTUPINFOA) }; | ||||
|     STARTUPINFOA si = {}; | ||||
|     si.cb = sizeof(STARTUPINFOA); | ||||
|     si.dwFlags = STARTF_USESTDHANDLES; | ||||
|     si.hStdOutput = stdout_write; | ||||
|     si.hStdError = stderr_write; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Peter
					Peter