mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	 cfa0750bc9
			
		
	
	cfa0750bc9
	
	
	
		
			
			* add interface for float input * fixed inpL shape and type * add examples of input floats * add test example for embd input * fixed sampling * add free for context * fixed add end condition for generating * add examples for llava.py * add READMD for llava.py * add READMD for llava.py * add example of PandaGPT * refactor the interface and fixed the styles * add cmake build for embd-input * add cmake build for embd-input * Add MiniGPT-4 example * change the order of the args of llama_eval_internal * fix ci error
		
			
				
	
	
		
			36 lines
		
	
	
		
			952 B
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			36 lines
		
	
	
		
			952 B
		
	
	
	
		
			C++
		
	
	
	
	
	
| #include "embd-input.h"
 | |
| #include <stdlib.h>
 | |
| #include <random>
 | |
| #include <string.h>
 | |
| 
 | |
| int main(int argc, char** argv) {
 | |
| 
 | |
|     auto mymodel = create_mymodel(argc, argv);
 | |
|     int N = 10;
 | |
|     int max_tgt_len = 500;
 | |
|     int n_embd = llama_n_embd(mymodel->ctx);
 | |
| 
 | |
|     // add random float embd to test evaluation
 | |
|     float * data = new float[N*n_embd];
 | |
|     std::default_random_engine e;
 | |
|     std::uniform_real_distribution<float>  u(0,1);
 | |
|     for (int i=0;i<N*n_embd;i++) {
 | |
|         data[i] = u(e);
 | |
|     }
 | |
| 
 | |
|     eval_string(mymodel, "user: what is the color of the flag of UN?");
 | |
|     eval_float(mymodel, data, N);
 | |
|     eval_string(mymodel, "assistant:");
 | |
|     eval_string(mymodel, mymodel->params.prompt.c_str());
 | |
|     const char* tmp;
 | |
|     for (int i=0; i<max_tgt_len; i++) {
 | |
|         tmp = sampling(mymodel);
 | |
|         if (strcmp(tmp, "</s>")==0) break;
 | |
|         printf("%s", tmp);
 | |
|         fflush(stdout);
 | |
|     }
 | |
|     printf("\n");
 | |
|     free_mymodel(mymodel);
 | |
|     return 0;
 | |
| }
 |