mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-30 08:42:00 +00:00
ggml-opencl, llama: using reserve() if count already known (#7272)
This commit is contained in:
@@ -16162,6 +16162,7 @@ static bool llama_control_vector_init(struct llama_control_vector & cvec, const
|
||||
}
|
||||
|
||||
// make tensors
|
||||
cvec.tensors.reserve(model.hparams.n_layer);
|
||||
cvec.tensors.push_back(nullptr); // there's never a tensor for layer 0
|
||||
for (size_t il = 1; il < model.hparams.n_layer; il++) {
|
||||
struct ggml_context * ctx = ctx_map.at(model.buft_layer[il].buft);
|
||||
@@ -16170,6 +16171,8 @@ static bool llama_control_vector_init(struct llama_control_vector & cvec, const
|
||||
}
|
||||
|
||||
// allocate tensors / buffers and zero
|
||||
cvec.ctxs.reserve(ctx_map.size());
|
||||
cvec.bufs.reserve(ctx_map.size());
|
||||
for (auto it : ctx_map) {
|
||||
ggml_backend_buffer_type_t buft = it.first;
|
||||
ggml_context * ctx = it.second;
|
||||
|
||||
Reference in New Issue
Block a user