mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	llama : add llama_vocab, functions -> methods, naming (#11110)
				
					
				
			* llama : functions -> methods (#11110) * llama : add struct llama_vocab to the API (#11156) ggml-ci * hparams : move vocab params to llama_vocab (#11159) ggml-ci * vocab : more pimpl (#11165) ggml-ci * vocab : minor tokenization optimizations (#11160) ggml-ci Co-authored-by: Diego Devesa <slarengh@gmail.com> * lora : update API names (#11167) ggml-ci * llama : update API names to use correct prefix (#11174) * llama : update API names to use correct prefix ggml-ci * cont ggml-ci * cont ggml-ci * minor [no ci] * vocab : llama_vocab_add_[be]os -> llama_vocab_get_add_[be]os (#11174) ggml-ci * vocab : llama_vocab_n_vocab -> llama_vocab_n_tokens (#11174) ggml-ci --------- Co-authored-by: Diego Devesa <slarengh@gmail.com>
This commit is contained in:
		| @@ -1,73 +1,74 @@ | ||||
| #pragma once | ||||
|  | ||||
| #include "llama-impl.h" | ||||
| #include "llama-hparams.h" | ||||
| #include "llama.h" | ||||
|  | ||||
| #include "ggml-cpp.h" | ||||
|  | ||||
| #include <string> | ||||
| #include <unordered_map> | ||||
| #include <vector> | ||||
|  | ||||
| // TODO: pimpl | ||||
|  | ||||
| // | ||||
| // llama_adapter_cvec | ||||
| // | ||||
|  | ||||
| // TODO: rename to llama_adapter_cvec | ||||
| struct llama_control_vector { | ||||
|     std::vector<ggml_context_ptr> ctxs; | ||||
|     std::vector<ggml_backend_buffer_ptr> bufs; | ||||
| struct llama_adapter_cvec { | ||||
|     struct ggml_tensor * tensor_for(int il) const; | ||||
|  | ||||
|     std::vector<struct ggml_tensor *> tensors; // per layer | ||||
|     struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int  il) const; | ||||
|  | ||||
|     int32_t apply( | ||||
|             const llama_model & model, | ||||
|             const float * data, | ||||
|             size_t len, | ||||
|             int32_t n_embd, | ||||
|             int32_t il_start, | ||||
|             int32_t il_end); | ||||
|  | ||||
| private: | ||||
|     bool init(const llama_model & model); | ||||
|  | ||||
|     int32_t layer_start = -1; | ||||
|     int32_t layer_end   = -1; | ||||
|  | ||||
|     struct ggml_tensor * tensor_for(int il) const; | ||||
|     std::vector<ggml_context_ptr> ctxs; | ||||
|     std::vector<ggml_backend_buffer_ptr> bufs; | ||||
|  | ||||
|     struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int  il) const; | ||||
|     std::vector<struct ggml_tensor *> tensors; // per layer | ||||
| }; | ||||
|  | ||||
| int32_t llama_control_vector_apply( | ||||
|         struct llama_control_vector & cvec, | ||||
|         const llama_model & model, | ||||
|         const float * data, | ||||
|         size_t len, | ||||
|         int32_t n_embd, | ||||
|         int32_t il_start, | ||||
|         int32_t il_end); | ||||
|  | ||||
| // | ||||
| // llama_adapter_lora | ||||
| // | ||||
|  | ||||
| // TODO: rename to llama_adapter_lora_weight | ||||
| struct llama_lora_weight { | ||||
| struct llama_adapter_lora_weight { | ||||
|     struct ggml_tensor * a = nullptr; | ||||
|     struct ggml_tensor * b = nullptr; | ||||
|  | ||||
|     // get actual scale based on rank and alpha | ||||
|     float get_scale(float alpha, float adapter_scale) { | ||||
|     float get_scale(float alpha, float adapter_scale) const { | ||||
|         const float rank  = (float) b->ne[0]; | ||||
|         const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale; | ||||
|         return scale; | ||||
|     } | ||||
|  | ||||
|     llama_lora_weight() = default; | ||||
|     llama_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {} | ||||
|     llama_adapter_lora_weight() = default; | ||||
|     llama_adapter_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {} | ||||
| }; | ||||
|  | ||||
| // TODO: rename to llama_adapter_lora | ||||
| struct llama_lora_adapter { | ||||
| struct llama_adapter_lora { | ||||
|     // map tensor name to lora_a_b | ||||
|     std::unordered_map<std::string, struct llama_lora_weight> ab_map; | ||||
|     std::unordered_map<std::string, struct llama_adapter_lora_weight> ab_map; | ||||
|  | ||||
|     std::vector<ggml_context_ptr> ctxs; | ||||
|     std::vector<ggml_backend_buffer_ptr> bufs; | ||||
|  | ||||
|     float alpha; | ||||
|  | ||||
|     llama_lora_adapter() = default; | ||||
|     ~llama_lora_adapter() = default; | ||||
|     llama_adapter_lora() = default; | ||||
|     ~llama_adapter_lora() = default; | ||||
|  | ||||
|     llama_lora_weight * get_weight(struct ggml_tensor * w); | ||||
|     llama_adapter_lora_weight * get_weight(struct ggml_tensor * w); | ||||
| }; | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov