mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-29 08:41:22 +00:00 
			
		
		
		
	build : fix several cast and printf warnings (#2499)
This commit is contained in:
		 Borislav Stanimirov
					Borislav Stanimirov
				
			
				
					committed by
					
						 GitHub
						GitHub
					
				
			
			
				
	
			
			
			 GitHub
						GitHub
					
				
			
						parent
						
							8183159cf3
						
					
				
				
					commit
					ff966e7ca6
				
			| @@ -30,7 +30,7 @@ struct MyModel* create_mymodel(int argc, char ** argv) { | |||||||
|     fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); |     fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); | ||||||
|  |  | ||||||
|     if (params.seed == LLAMA_DEFAULT_SEED) { |     if (params.seed == LLAMA_DEFAULT_SEED) { | ||||||
|         params.seed = time(NULL); |         params.seed = uint32_t(time(NULL)); | ||||||
|     } |     } | ||||||
|     fprintf(stderr, "%s: seed  = %d\n", __func__, params.seed); |     fprintf(stderr, "%s: seed  = %d\n", __func__, params.seed); | ||||||
|  |  | ||||||
|   | |||||||
| @@ -405,7 +405,7 @@ namespace grammar_parser { | |||||||
|             for (size_t i = 0, end = state.rules.size(); i < end; i++) { |             for (size_t i = 0, end = state.rules.size(); i < end; i++) { | ||||||
|                 // fprintf(file, "%zu: ", i); |                 // fprintf(file, "%zu: ", i); | ||||||
|                 // print_rule_binary(file, state.rules[i]); |                 // print_rule_binary(file, state.rules[i]); | ||||||
|                 print_rule(file, i, state.rules[i], symbol_id_names); |                 print_rule(file, uint32_t(i), state.rules[i], symbol_id_names); | ||||||
|                 // fprintf(file, "\n"); |                 // fprintf(file, "\n"); | ||||||
|             } |             } | ||||||
|         } catch (const std::exception & err) { |         } catch (const std::exception & err) { | ||||||
|   | |||||||
| @@ -153,7 +153,7 @@ void hellaswag_score(llama_context * ctx, const gpt_params & params) { | |||||||
|     } |     } | ||||||
|  |  | ||||||
|     size_t hs_task_count = prompt_lines.size()/6; |     size_t hs_task_count = prompt_lines.size()/6; | ||||||
|     fprintf(stderr, "%s : loaded %lu tasks from prompt.\n", __func__, hs_task_count); |     fprintf(stderr, "%s : loaded %zu tasks from prompt.\n", __func__, hs_task_count); | ||||||
|  |  | ||||||
|     // This is needed as usual for LLaMA models |     // This is needed as usual for LLaMA models | ||||||
|     bool prepend_bos = true; |     bool prepend_bos = true; | ||||||
| @@ -178,7 +178,7 @@ void hellaswag_score(llama_context * ctx, const gpt_params & params) { | |||||||
|         double ending_logprob[4]; |         double ending_logprob[4]; | ||||||
|     }; |     }; | ||||||
|  |  | ||||||
|     fprintf(stderr, "%s : selecting %lu %s tasks.\n", __func__, hs_task_count, (randomize_tasks?"randomized":"the first")  ); |     fprintf(stderr, "%s : selecting %zu %s tasks.\n", __func__, hs_task_count, (randomize_tasks?"randomized":"the first")  ); | ||||||
|  |  | ||||||
|     // Select and read data from prompt lines |     // Select and read data from prompt lines | ||||||
|     hs_data_t *hs_data = new hs_data_t[hs_task_count]; |     hs_data_t *hs_data = new hs_data_t[hs_task_count]; | ||||||
| @@ -223,7 +223,7 @@ void hellaswag_score(llama_context * ctx, const gpt_params & params) { | |||||||
|  |  | ||||||
|             // Stop if query wont fit the ctx window |             // Stop if query wont fit the ctx window | ||||||
|             if (query_size > (size_t)params.n_ctx) { |             if (query_size > (size_t)params.n_ctx) { | ||||||
|                 fprintf(stderr, "%s : number of tokens in query %lu > n_ctxl\n", __func__, query_size); |                 fprintf(stderr, "%s : number of tokens in query %zu > n_ctxl\n", __func__, query_size); | ||||||
|                 return; |                 return; | ||||||
|             } |             } | ||||||
|  |  | ||||||
| @@ -284,7 +284,7 @@ void hellaswag_score(llama_context * ctx, const gpt_params & params) { | |||||||
|         } |         } | ||||||
|  |  | ||||||
|         // Print the accumulated accuracy mean x 100 |         // Print the accumulated accuracy mean x 100 | ||||||
|         printf("%li\t%.8lf\n",task_idx+1, acc/double(task_idx+1)*100.0); |         printf("%zu\t%.8lf\n",task_idx+1, acc/double(task_idx+1)*100.0); | ||||||
|         fflush(stdout); |         fflush(stdout); | ||||||
|     } |     } | ||||||
|  |  | ||||||
|   | |||||||
| @@ -123,7 +123,7 @@ int main(int argc, char ** argv) | |||||||
|         // Evaluate the tokens : |         // Evaluate the tokens : | ||||||
|         //--------------------------------- |         //--------------------------------- | ||||||
|  |  | ||||||
|         if ( llama_eval( ctx , tokens_list.data() , tokens_list.size() , llama_get_kv_cache_token_count( ctx ) , params.n_threads ) ) |         if ( llama_eval( ctx , tokens_list.data() , int(tokens_list.size()) , llama_get_kv_cache_token_count( ctx ) , params.n_threads ) ) | ||||||
|         { |         { | ||||||
|             fprintf( stderr,  "%s : failed to eval\n" , __func__ ); |             fprintf( stderr,  "%s : failed to eval\n" , __func__ ); | ||||||
|             return 1; |             return 1; | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user