mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-05 09:36:52 +00:00
MPI support, first cut
This commit is contained in:
137
llama.cpp
137
llama.cpp
@@ -49,6 +49,8 @@
|
||||
#include <sstream>
|
||||
#include <numeric>
|
||||
|
||||
#include <mpi.h>
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||
#endif
|
||||
@@ -330,6 +332,9 @@ struct llama_context {
|
||||
ggml_metal_context * ctx_metal = NULL;
|
||||
#endif
|
||||
|
||||
int mpi_rank;
|
||||
int mpi_size;
|
||||
|
||||
int buf_last = 0;
|
||||
size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 };
|
||||
|
||||
@@ -864,6 +869,15 @@ void llama_init_backend(bool numa) {
|
||||
if (numa) {
|
||||
ggml_numa_init();
|
||||
}
|
||||
#ifdef GGML_USE_MPI
|
||||
MPI_Init(NULL, NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
void llama_finalize_backend() {
|
||||
#ifdef GGML_USE_MPI
|
||||
MPI_Finalize();
|
||||
#endif
|
||||
}
|
||||
|
||||
int64_t llama_time_us() {
|
||||
@@ -1307,7 +1321,16 @@ static bool llama_eval_internal(
|
||||
struct ggml_tensor * cur;
|
||||
struct ggml_tensor * inpL;
|
||||
|
||||
if (tokens) {
|
||||
if (lctx.mpi_rank > 0) {
|
||||
#ifdef GGML_USE_MPI
|
||||
inpL = ggml_recv_tensor(ctx0, NULL,
|
||||
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N),
|
||||
lctx.mpi_rank-1);
|
||||
ggml_set_name(inpL, "recv");
|
||||
#else
|
||||
GGML_ASSERT(false);
|
||||
#endif
|
||||
} else if (tokens) {
|
||||
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
||||
ggml_set_name(embd, "embd");
|
||||
memcpy(embd->data, tokens, N*ggml_element_size(embd));
|
||||
@@ -1341,7 +1364,9 @@ static bool llama_eval_internal(
|
||||
}
|
||||
#endif // GGML_USE_CUBLAS
|
||||
|
||||
for (int il = 0; il < n_layer; ++il) {
|
||||
// EMM TODO distribute work more evenly - maybe rank=0 gets the smallest amount?
|
||||
int slice_size = (n_layer + (lctx.mpi_size - 1)) / lctx.mpi_size;
|
||||
for (int il = lctx.mpi_rank * slice_size; il < n_layer && il < (lctx.mpi_rank + 1) * slice_size; ++il) {
|
||||
offload_func_t offload_func = llama_nop;
|
||||
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
@@ -1556,26 +1581,37 @@ static bool llama_eval_internal(
|
||||
// used at the end to optionally extract the embeddings
|
||||
struct ggml_tensor * embeddings = NULL;
|
||||
|
||||
#ifdef GGML_USE_MPI
|
||||
cur = ggml_send_tensor(ctx0, cur, (lctx.mpi_rank+1)%lctx.mpi_size);
|
||||
ggml_set_name(cur, "send");
|
||||
#endif
|
||||
if (lctx.mpi_rank == 0) {
|
||||
#ifdef GGML_USE_MPI
|
||||
cur = ggml_recv_tensor(ctx0, cur,
|
||||
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N),
|
||||
lctx.mpi_size-1);
|
||||
ggml_set_name(cur, "recv");
|
||||
#endif
|
||||
// norm
|
||||
{
|
||||
cur = ggml_rms_norm(ctx0, cur);
|
||||
offload_func_nr(cur);
|
||||
ggml_set_name(cur, "rms_norm_2");
|
||||
|
||||
// norm
|
||||
{
|
||||
cur = ggml_rms_norm(ctx0, inpL);
|
||||
offload_func_nr(cur);
|
||||
ggml_set_name(cur, "rms_norm_2");
|
||||
// cur = cur*norm(broadcasted)
|
||||
cur = ggml_mul(ctx0, cur, model.norm);
|
||||
// offload_func_nr(cur); // TODO CPU + GPU mirrored backend
|
||||
ggml_set_name(cur, "result_norm");
|
||||
|
||||
// cur = cur*norm(broadcasted)
|
||||
cur = ggml_mul(ctx0, cur, model.norm);
|
||||
// offload_func_nr(cur); // TODO CPU + GPU mirrored backend
|
||||
ggml_set_name(cur, "result_norm");
|
||||
embeddings = cur;
|
||||
}
|
||||
|
||||
embeddings = cur;
|
||||
|
||||
// lm_head
|
||||
cur = ggml_mul_mat(ctx0, model.output, cur);
|
||||
ggml_set_name(cur, "result_output");
|
||||
}
|
||||
|
||||
|
||||
// lm_head
|
||||
cur = ggml_mul_mat(ctx0, model.output, cur);
|
||||
ggml_set_name(cur, "result_output");
|
||||
|
||||
lctx.use_buf(ctx0, -1);
|
||||
|
||||
// logits -> probs
|
||||
@@ -1632,26 +1668,28 @@ static bool llama_eval_internal(
|
||||
// update kv token count
|
||||
lctx.kv_self.n = n_past + N;
|
||||
|
||||
// extract logits
|
||||
{
|
||||
auto & logits_out = lctx.logits;
|
||||
if (lctx.mpi_rank == 0) {
|
||||
// extract logits
|
||||
{
|
||||
auto & logits_out = lctx.logits;
|
||||
|
||||
if (lctx.logits_all) {
|
||||
logits_out.resize(n_vocab * N);
|
||||
memcpy(logits_out.data(), (float *) ggml_get_data(cur), sizeof(float)*n_vocab*N);
|
||||
} else {
|
||||
// return result for just the last token
|
||||
logits_out.resize(n_vocab);
|
||||
memcpy(logits_out.data(), (float *) ggml_get_data(cur) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
||||
if (lctx.logits_all) {
|
||||
logits_out.resize(n_vocab * N);
|
||||
memcpy(logits_out.data(), (float *) ggml_get_data(cur), sizeof(float)*n_vocab*N);
|
||||
} else {
|
||||
// return result for just the last token
|
||||
logits_out.resize(n_vocab);
|
||||
memcpy(logits_out.data(), (float *) ggml_get_data(cur) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// extract embeddings
|
||||
if (!lctx.embedding.empty()) {
|
||||
auto & embedding_out = lctx.embedding;
|
||||
// extract embeddings
|
||||
if (!lctx.embedding.empty()) {
|
||||
auto & embedding_out = lctx.embedding;
|
||||
|
||||
embedding_out.resize(n_embd);
|
||||
memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
|
||||
embedding_out.resize(n_embd);
|
||||
memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
|
||||
}
|
||||
}
|
||||
|
||||
if (mem_per_token == 0) {
|
||||
@@ -2603,6 +2641,14 @@ struct llama_context * llama_new_context_with_model(
|
||||
ctx->rng = std::mt19937(params.seed);
|
||||
ctx->logits_all = params.logits_all;
|
||||
|
||||
#ifdef GGML_USE_MPI
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &ctx->mpi_size);
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &ctx->mpi_rank);
|
||||
#else
|
||||
ctx->mpi_size = 1;
|
||||
ctx->mpi_rank = 0;
|
||||
#endif
|
||||
|
||||
ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
|
||||
|
||||
// reserve memory for context buffers
|
||||
@@ -2675,6 +2721,16 @@ struct llama_context * llama_new_context_with_model(
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ctx->mpi_rank > 0) {
|
||||
// Enter a blocking eval loop with dummy input, letting rank=0 drive the process
|
||||
const std::vector<llama_token> tmp = { llama_token_bos(), };
|
||||
while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0));
|
||||
#ifdef GGML_USE_MPI
|
||||
MPI_Finalize();
|
||||
#endif
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
@@ -3351,6 +3407,13 @@ int llama_eval(
|
||||
int n_tokens,
|
||||
int n_past,
|
||||
int n_threads) {
|
||||
#ifdef GGML_USE_MPI
|
||||
// Synchronize the worker node parameters with the root node
|
||||
MPI_Barrier(MPI_COMM_WORLD);
|
||||
MPI_Bcast(&n_past, 1, MPI_INT, 0, MPI_COMM_WORLD);
|
||||
MPI_Bcast(&n_tokens, 1, MPI_INT, 0, MPI_COMM_WORLD);
|
||||
MPI_Bcast(&n_threads, 1, MPI_INT, 0, MPI_COMM_WORLD);
|
||||
#endif
|
||||
if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) {
|
||||
fprintf(stderr, "%s: failed to eval\n", __func__);
|
||||
return 1;
|
||||
@@ -3434,6 +3497,14 @@ int llama_n_embd(const struct llama_context * ctx) {
|
||||
return ctx->model.hparams.n_embd;
|
||||
}
|
||||
|
||||
int llama_mpi_rank(const struct llama_context * ctx) {
|
||||
return ctx->mpi_rank;
|
||||
}
|
||||
|
||||
int llama_mpi_size(const struct llama_context * ctx) {
|
||||
return ctx->mpi_size;
|
||||
}
|
||||
|
||||
int llama_get_vocab(
|
||||
const struct llama_context * ctx,
|
||||
const char * * strings,
|
||||
|
||||
Reference in New Issue
Block a user