mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	* add interface for float input * fixed inpL shape and type * add examples of input floats * add test example for embd input * fixed sampling * add free for context * fixed add end condition for generating * add examples for llava.py * add READMD for llava.py * add READMD for llava.py * add example of PandaGPT * refactor the interface and fixed the styles * add cmake build for embd-input * add cmake build for embd-input * Add MiniGPT-4 example * change the order of the args of llama_eval_internal * fix ci error
		
			
				
	
	
		
			31 lines
		
	
	
		
			664 B
		
	
	
	
		
			C++
		
	
	
	
	
	
			
		
		
	
	
			31 lines
		
	
	
		
			664 B
		
	
	
	
		
			C++
		
	
	
	
	
	
#ifndef _EMBD_INPUT_H_
 | 
						|
#define _EMBD_INPUT_H_ 1
 | 
						|
 | 
						|
#include "common.h"
 | 
						|
#include "llama.h"
 | 
						|
#include "build-info.h"
 | 
						|
 | 
						|
 | 
						|
extern "C" {
 | 
						|
 | 
						|
typedef struct MyModel {
 | 
						|
    llama_context* ctx;
 | 
						|
    gpt_params params;
 | 
						|
    int n_past = 0;
 | 
						|
} MyModel;
 | 
						|
 | 
						|
 | 
						|
struct MyModel* create_mymodel(int argc, char ** argv);
 | 
						|
 | 
						|
bool eval_float(void* model, float* input, int N);
 | 
						|
bool eval_tokens(void* model, std::vector<llama_token> tokens);
 | 
						|
bool eval_id(struct MyModel* mymodel, int id);
 | 
						|
bool eval_string(struct MyModel* mymodel, const char* str);
 | 
						|
const char* sampling(struct MyModel* mymodel);
 | 
						|
llama_token sampling_id(struct MyModel* mymodel);
 | 
						|
void free_mymodel(struct MyModel* mymodel);
 | 
						|
 | 
						|
}
 | 
						|
 | 
						|
#endif
 |