mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-28 08:31:25 +00:00 
			
		
		
		
	embd-input : fix returning ptr to temporary
This commit is contained in:
		| @@ -210,9 +210,12 @@ llama_token sampling_id(struct MyModel* mymodel) { | |||||||
| const char * sampling(struct MyModel * mymodel) { | const char * sampling(struct MyModel * mymodel) { | ||||||
|     llama_context * ctx = mymodel->ctx; |     llama_context * ctx = mymodel->ctx; | ||||||
|     int id = sampling_id(mymodel); |     int id = sampling_id(mymodel); | ||||||
|     std::string ret; |     static std::string ret; | ||||||
|     if (id == llama_token_eos()) ret = "</s>"; |     if (id == llama_token_eos()) { | ||||||
|     else ret = llama_token_to_str(ctx, id); |         ret = "</s>"; | ||||||
|  |     } else { | ||||||
|  |         ret = llama_token_to_str(ctx, id); | ||||||
|  |     } | ||||||
|     eval_id(mymodel, id); |     eval_id(mymodel, id); | ||||||
|     return ret.c_str(); |     return ret.c_str(); | ||||||
| } | } | ||||||
|   | |||||||
| @@ -5,7 +5,6 @@ | |||||||
| #include "llama.h" | #include "llama.h" | ||||||
| #include "build-info.h" | #include "build-info.h" | ||||||
|  |  | ||||||
|  |  | ||||||
| extern "C" { | extern "C" { | ||||||
|  |  | ||||||
| typedef struct MyModel { | typedef struct MyModel { | ||||||
| @@ -14,7 +13,6 @@ typedef struct MyModel { | |||||||
|     int n_past = 0; |     int n_past = 0; | ||||||
| } MyModel; | } MyModel; | ||||||
|  |  | ||||||
|  |  | ||||||
| struct MyModel* create_mymodel(int argc, char ** argv); | struct MyModel* create_mymodel(int argc, char ** argv); | ||||||
|  |  | ||||||
| bool eval_float(void* model, float* input, int N); | bool eval_float(void* model, float* input, int N); | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov