mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	adapt common
This commit is contained in:
		| @@ -1047,7 +1047,8 @@ struct common_init_result common_init_from_params(common_params & params) { | ||||
|         } | ||||
|  | ||||
|         if (llama_model_has_encoder(model)) { | ||||
|             llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size())); | ||||
|             llama_batch_ext_ptr batch(llama_batch_ext_init_from_text(tmp.data(), tmp.size(), 0, 0)); | ||||
|             llama_encode_ext(lctx, batch.get()); | ||||
|             llama_token decoder_start_token_id = llama_model_decoder_start_token(model); | ||||
|             if (decoder_start_token_id == LLAMA_TOKEN_NULL) { | ||||
|                 decoder_start_token_id = bos; | ||||
| @@ -1056,7 +1057,8 @@ struct common_init_result common_init_from_params(common_params & params) { | ||||
|             tmp.push_back(decoder_start_token_id); | ||||
|         } | ||||
|         if (llama_model_has_decoder(model)) { | ||||
|             llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch))); | ||||
|             llama_batch_ext_ptr batch(llama_batch_ext_init_from_text(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0)); | ||||
|             llama_encode_ext(lctx, batch.get()); | ||||
|         } | ||||
|         llama_kv_cache_clear(lctx); | ||||
|         llama_synchronize(lctx); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Xuan Son Nguyen
					Xuan Son Nguyen