mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-29 08:41:22 +00:00 
			
		
		
		
	 b12fa0d1c1
			
		
	
	b12fa0d1c1
	
	
	
		
			
			* cmake : fix build when .git does not exist * cmake : simplify BUILD_INFO target * cmake : add missing dependencies on BUILD_INFO * build : link against build info instead of compiling against it * zig : make build info a .cpp source instead of a header Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com> * cmake : revert change to CMP0115 --------- Co-authored-by: Matheus C. França <matheus-catarino@hotmail.com>
		
			
				
	
	
		
			105 lines
		
	
	
		
			2.8 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			105 lines
		
	
	
		
			2.8 KiB
		
	
	
	
		
			C++
		
	
	
	
	
	
| #include "common.h"
 | |
| #include "llama.h"
 | |
| 
 | |
| #include <ctime>
 | |
| 
 | |
| #if defined(_MSC_VER)
 | |
| #pragma warning(disable: 4244 4267) // possible loss of data
 | |
| #endif
 | |
| 
 | |
| int main(int argc, char ** argv) {
 | |
|     gpt_params params;
 | |
| 
 | |
|     if (!gpt_params_parse(argc, argv, params)) {
 | |
|         return 1;
 | |
|     }
 | |
| 
 | |
|     params.embedding = true;
 | |
| 
 | |
|     print_build_info();
 | |
| 
 | |
|     if (params.seed == LLAMA_DEFAULT_SEED) {
 | |
|         params.seed = time(NULL);
 | |
|     }
 | |
| 
 | |
|     fprintf(stderr, "%s: seed  = %u\n", __func__, params.seed);
 | |
| 
 | |
|     std::mt19937 rng(params.seed);
 | |
|     if (params.random_prompt) {
 | |
|         params.prompt = gpt_random_prompt(rng);
 | |
|     }
 | |
| 
 | |
|     llama_backend_init(params.numa);
 | |
| 
 | |
|     llama_model * model;
 | |
|     llama_context * ctx;
 | |
| 
 | |
|     // load the model
 | |
|     std::tie(model, ctx) = llama_init_from_gpt_params(params);
 | |
|     if (model == NULL) {
 | |
|         fprintf(stderr, "%s: error: unable to load model\n", __func__);
 | |
|         return 1;
 | |
|     }
 | |
| 
 | |
|     const int n_ctx_train = llama_n_ctx_train(model);
 | |
|     const int n_ctx = llama_n_ctx(ctx);
 | |
| 
 | |
|     if (n_ctx > n_ctx_train) {
 | |
|         fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n",
 | |
|                 __func__, n_ctx_train, n_ctx);
 | |
|     }
 | |
| 
 | |
|     // print system information
 | |
|     {
 | |
|         fprintf(stderr, "\n");
 | |
|         fprintf(stderr, "%s\n", get_system_info(params).c_str());
 | |
|     }
 | |
| 
 | |
|     int n_past = 0;
 | |
| 
 | |
|     // tokenize the prompt
 | |
|     auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
 | |
| 
 | |
|     if (params.verbose_prompt) {
 | |
|         fprintf(stderr, "\n");
 | |
|         fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
 | |
|         fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
 | |
|         for (int i = 0; i < (int) embd_inp.size(); i++) {
 | |
|             fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
 | |
|         }
 | |
|         fprintf(stderr, "\n");
 | |
|     }
 | |
| 
 | |
|     if (embd_inp.size() > (size_t)n_ctx) {
 | |
|         fprintf(stderr, "%s: error: prompt is longer than the context window (%zu tokens, n_ctx = %d)\n",
 | |
|                 __func__, embd_inp.size(), n_ctx);
 | |
|         return 1;
 | |
|     }
 | |
| 
 | |
|     while (!embd_inp.empty()) {
 | |
|         int n_tokens = std::min(params.n_batch, (int) embd_inp.size());
 | |
|         if (llama_decode(ctx, llama_batch_get_one(embd_inp.data(), n_tokens, n_past, 0))) {
 | |
|             fprintf(stderr, "%s : failed to eval\n", __func__);
 | |
|             return 1;
 | |
|         }
 | |
|         n_past += n_tokens;
 | |
|         embd_inp.erase(embd_inp.begin(), embd_inp.begin() + n_tokens);
 | |
|     }
 | |
| 
 | |
|     const int n_embd = llama_n_embd(model);
 | |
|     const auto * embeddings = llama_get_embeddings(ctx);
 | |
| 
 | |
|     for (int i = 0; i < n_embd; i++) {
 | |
|         printf("%f ", embeddings[i]);
 | |
|     }
 | |
|     printf("\n");
 | |
| 
 | |
|     llama_print_timings(ctx);
 | |
|     llama_free(ctx);
 | |
|     llama_free_model(model);
 | |
| 
 | |
|     llama_backend_free();
 | |
| 
 | |
|     return 0;
 | |
| }
 |