mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : add llama_vocab, functions -> methods, naming (#11110)
				
					
				
			* llama : functions -> methods (#11110) * llama : add struct llama_vocab to the API (#11156) ggml-ci * hparams : move vocab params to llama_vocab (#11159) ggml-ci * vocab : more pimpl (#11165) ggml-ci * vocab : minor tokenization optimizations (#11160) ggml-ci Co-authored-by: Diego Devesa <slarengh@gmail.com> * lora : update API names (#11167) ggml-ci * llama : update API names to use correct prefix (#11174) * llama : update API names to use correct prefix ggml-ci * cont ggml-ci * cont ggml-ci * minor [no ci] * vocab : llama_vocab_add_[be]os -> llama_vocab_get_add_[be]os (#11174) ggml-ci * vocab : llama_vocab_n_vocab -> llama_vocab_n_tokens (#11174) ggml-ci --------- Co-authored-by: Diego Devesa <slarengh@gmail.com>
This commit is contained in:
		| @@ -8,7 +8,6 @@ | ||||
| #include <map> | ||||
| #include <vector> | ||||
| #include <string> | ||||
| #include <thread> | ||||
| #include <fstream> | ||||
|  | ||||
| static bool g_verbose = false; | ||||
| @@ -130,7 +129,7 @@ struct lora_merge_ctx { | ||||
|  | ||||
|     lora_merge_ctx( | ||||
|             std::string & base_fname, | ||||
|             std::vector<common_lora_adapter_info> & lora_files, | ||||
|             std::vector<common_adapter_lora_info> & lora_files, | ||||
|             std::string & outfile, | ||||
|             int n_threads) : base_model(base_fname, 0), n_threads(n_threads), fout(outfile, std::ios::binary) { | ||||
|         fout.exceptions(std::ofstream::failbit); // fail fast on write errors | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Georgi Gerganov
					Georgi Gerganov