enc-dec : compose wip

ggml-ci
This commit is contained in:
Georgi Gerganov
2025-02-24 15:16:45 +02:00
parent 9cd78f11a1
commit be58e30017
5 changed files with 1002 additions and 404 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -30,8 +30,7 @@ public:
virtual void synchronize() = 0; virtual void synchronize() = 0;
virtual const llama_model & get_model() const = 0; virtual const llama_model & get_model() const = 0;
virtual const llama_cparams & get_cparams() const = 0;
virtual uint32_t n_ctx() const = 0; virtual uint32_t n_ctx() const = 0;
virtual uint32_t n_ctx_per_seq() const = 0; virtual uint32_t n_ctx_per_seq() const = 0;
@@ -42,8 +41,6 @@ public:
virtual uint32_t n_threads() const = 0; virtual uint32_t n_threads() const = 0;
virtual uint32_t n_threads_batch() const = 0; virtual uint32_t n_threads_batch() const = 0;
virtual int32_t max_nodes() const = 0;
// self-attention: // self-attention:
// if the context does not have a KV cache, return nullptr // if the context does not have a KV cache, return nullptr
@@ -62,8 +59,6 @@ public:
virtual float * get_embeddings_ith(int32_t i) = 0; virtual float * get_embeddings_ith(int32_t i) = 0;
virtual float * get_embeddings_seq(llama_seq_id seq_id) = 0; virtual float * get_embeddings_seq(llama_seq_id seq_id) = 0;
virtual int64_t n_pos_per_token() const = 0; // vision
virtual void attach_threadpool( virtual void attach_threadpool(
ggml_threadpool_t threadpool, ggml_threadpool_t threadpool,
ggml_threadpool_t threadpool_batch) = 0; ggml_threadpool_t threadpool_batch) = 0;
@@ -190,8 +185,7 @@ protected:
virtual void reserve(); virtual void reserve();
public: public:
const llama_model & get_model() const override; const llama_model & get_model() const override;
const llama_cparams & get_cparams() const override;
uint32_t n_ctx() const override; uint32_t n_ctx() const override;
uint32_t n_ctx_per_seq() const override; uint32_t n_ctx_per_seq() const override;
@@ -202,15 +196,9 @@ public:
uint32_t n_threads() const override; uint32_t n_threads() const override;
uint32_t n_threads_batch() const override; uint32_t n_threads_batch() const override;
int32_t max_nodes() const override;
// self-attention:
// if the context does not have a KV cache, return nullptr
llama_kv_cache * get_kv_self() override; llama_kv_cache * get_kv_self() override;
const llama_kv_cache * get_kv_self() const override; const llama_kv_cache * get_kv_self() const override;
// if the context does not have a KV cache, noop
void kv_self_update() override; void kv_self_update() override;
enum llama_pooling_type pooling_type() const override; enum llama_pooling_type pooling_type() const override;
@@ -222,8 +210,6 @@ public:
float * get_embeddings_ith(int32_t i) override; float * get_embeddings_ith(int32_t i) override;
float * get_embeddings_seq(llama_seq_id seq_id) override; float * get_embeddings_seq(llama_seq_id seq_id) override;
int64_t n_pos_per_token() const override; // vision
void attach_threadpool( void attach_threadpool(
ggml_threadpool_t threadpool, ggml_threadpool_t threadpool,
ggml_threadpool_t threadpool_batch) override; ggml_threadpool_t threadpool_batch) override;
@@ -261,6 +247,8 @@ protected:
// input // input
// //
virtual int64_t n_pos_per_token() const; // vision
// when the compute graph is built, it creates the input tensors that it needs // when the compute graph is built, it creates the input tensors that it needs
// the contents of the input tensors are set by the input_set() function // the contents of the input tensors are set by the input_set() function
@@ -299,6 +287,8 @@ protected:
// graph // graph
// //
virtual int32_t graph_max_nodes() const;
// zero-out inputs and create the ctx_compute for the compute graph // zero-out inputs and create the ctx_compute for the compute graph
virtual ggml_cgraph * graph_init(); virtual ggml_cgraph * graph_init();
@@ -477,11 +467,11 @@ public:
size_t n_token_count) override; size_t n_token_count) override;
protected: protected:
virtual size_t state_get_data(llama_io_write_i & io); virtual size_t state_write_data(llama_io_write_i & io);
virtual size_t state_set_data(llama_io_read_i & io); virtual size_t state_read_data (llama_io_read_i & io);
virtual size_t state_seq_get_data(llama_io_write_i & io, llama_seq_id seq_id); virtual size_t state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id);
virtual size_t state_seq_set_data(llama_io_read_i & io, llama_seq_id seq_id); virtual size_t state_seq_read_data (llama_io_read_i & io, llama_seq_id seq_id);
// //
// members // members
@@ -625,39 +615,15 @@ protected:
ggml_context * ctx0, ggml_context * ctx0,
ggml_cgraph * gf) override; ggml_cgraph * gf) override;
// =======================================================
// === encoder-decoder ===
//
// TODO: this is temporary here, it will be moved
//
// whether we are computing encoder output or decoder output
bool is_encoding = false;
// output of the encoder part of the encoder-decoder models
std::vector<float> embd_enc;
std::vector<std::set<llama_seq_id>> seq_ids_enc;
struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc]
struct ggml_tensor * inp_kq_mask_cross; // F32 [n_outputs_enc, n_batch]
ggml_tensor * build_inp_embd_enc(
ggml_context * ctx0) override;
ggml_tensor * build_inp_kq_mask_cross(
ggml_context * ctx0,
int32_t n_tokens) override;
// ======================================================
// //
// state save/load // state save/load
// //
size_t state_get_data(llama_io_write_i & io) override; size_t state_write_data(llama_io_write_i & io) override;
size_t state_set_data(llama_io_read_i & io) override; size_t state_read_data (llama_io_read_i & io) override;
size_t state_seq_get_data(llama_io_write_i & io, llama_seq_id seq_id) override; size_t state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id) override;
size_t state_seq_set_data(llama_io_read_i & io, llama_seq_id seq_id) override; size_t state_seq_read_data (llama_io_read_i & io, llama_seq_id seq_id) override;
private: private:
// //
@@ -767,11 +733,11 @@ protected:
// state save/load // state save/load
// //
size_t state_get_data(llama_io_write_i & io) override; size_t state_write_data(llama_io_write_i & io) override;
size_t state_set_data(llama_io_read_i & io) override; size_t state_read_data (llama_io_read_i & io) override;
size_t state_seq_get_data(llama_io_write_i & io, llama_seq_id seq_id) override; size_t state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id) override;
size_t state_seq_set_data(llama_io_read_i & io, llama_seq_id seq_id) override; size_t state_seq_read_data (llama_io_read_i & io, llama_seq_id seq_id) override;
private: private:
// //
@@ -782,21 +748,206 @@ private:
llama_kv_cache_recurrent kv_self; llama_kv_cache_recurrent kv_self;
}; };
// TODO: tmp - need something better
struct llama_cross {
int32_t n_outputs;
float * embd_enc;
std::vector<std::set<llama_seq_id>> seq_ids_enc;
};
class llama_context_enc : public llama_context_base { class llama_context_enc : public llama_context_base {
public: public:
using llama_context_base::llama_context_base; using llama_context_base::llama_context_base;
int encode(llama_batch & inp_batch) override;
llama_cross * cross = nullptr;
}; };
class llama_context_enc_dec : public llama_context_enc { class llama_context_dec : public llama_context_kv_self {
public:
using llama_context_kv_self::llama_context_kv_self;
protected:
void reserve() override;
//
// input
//
void input_set(const llama_ubatch & ubatch) override;
private:
struct {
ggml_tensor * cross_embd; // F32 [n_embd, n_outputs_enc]
ggml_tensor * cross_kq_mask; // F32 [n_outputs_enc, n_batch]
ggml_tensor * cross_kq_mask_cnv; // F32 [n_outputs_enc, n_batch]
} inp;
protected:
//
// graph
//
ggml_cgraph * graph_init() override;
ggml_tensor * build_inp_cross_embd(
ggml_context * ctx0) override;
void build_attn_inp(
ggml_context * ctx0,
int32_t n_tokens,
bool causal,
bool swa) override;
ggml_tensor * build_attn_cross(
ggml_context * ctx0,
ggml_cgraph * gf,
ggml_tensor * q_cur,
ggml_tensor * k_cur,
ggml_tensor * v_cur,
ggml_tensor * kq_b,
float kq_scale,
int il) override;
public:
llama_cross * cross = nullptr;
};
class llama_context_enc_dec : public llama_context_i {
public: public:
llama_context_enc_dec( llama_context_enc_dec(
const llama_model & model, const llama_model & model,
llama_context_params params); llama_context_params params);
virtual ~llama_context_enc_dec(); ~llama_context_enc_dec();
void init() override;
void synchronize() override;
const llama_model & get_model() const override;
// TODO: the default implementation of these getters calls the corresponding getter of the enc or dec context
// in the future, the public API in llama.h should allow to get references to the context that the user wants
// this will allow to specify the desired context explicitly
// for example:
//
// // this can be an enc-dec context
// llama_context_t ctx = llama_init_from_model(...);
//
// ...
//
// llama_context_t ctx_enc = llama_get_ctx_enc(ctx);
// llama_set_embeddings(ctx_enc, true);
//
// llama_context_t ctx_dec = llama_get_ctx_dec(ctx);
// llama_set_causal_attn(ctx_dec, true);
//
uint32_t n_ctx() const override;
uint32_t n_ctx_per_seq() const override;
uint32_t n_batch() const override;
uint32_t n_ubatch() const override;
uint32_t n_seq_max() const override;
uint32_t n_threads() const override;
uint32_t n_threads_batch() const override;
llama_kv_cache * get_kv_self() override;
const llama_kv_cache * get_kv_self() const override;
void kv_self_update() override;
enum llama_pooling_type pooling_type() const override;
float * get_logits() override;
float * get_logits_ith(int32_t i) override;
float * get_embeddings() override;
float * get_embeddings_ith(int32_t i) override;
float * get_embeddings_seq(llama_seq_id seq_id) override;
void attach_threadpool(
ggml_threadpool_t threadpool,
ggml_threadpool_t threadpool_batch) override;
void detach_threadpool() override;
void set_n_threads(int32_t n_threads, int32_t n_threads_batch) override;
void set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data) override;
void set_embeddings (bool value) override;
void set_causal_attn(bool value) override;
void set_adapter_lora(
llama_adapter_lora * adapter,
float scale) override;
bool rm_adapter_lora(
llama_adapter_lora * adapter) override;
void clear_adapter_lora() override;
bool apply_adapter_cvec(
const float * data,
size_t len,
int32_t n_embd,
int32_t il_start,
int32_t il_end) override;
int encode(llama_batch & inp_batch) override;
int decode(llama_batch & inp_batch) override;
//
// perf
//
llama_perf_context_data perf_get_data() const override;
void perf_reset() override;
//
// state save/load
//
size_t state_get_size() override;
size_t state_get_data( uint8_t * dst, size_t size) override;
size_t state_set_data(const uint8_t * src, size_t size) override;
size_t state_seq_get_size(llama_seq_id seq_id) override;
size_t state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size) override;
size_t state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size) override;
bool state_load_file(
const char * filepath,
llama_token * tokens_out,
size_t n_token_capacity,
size_t * n_token_count_out) override;
bool state_save_file(
const char * filepath,
const llama_token * tokens,
size_t n_token_count) override;
size_t state_seq_load_file(
llama_seq_id seq_id,
const char * filepath,
llama_token * tokens_out,
size_t n_token_capacity,
size_t * n_token_count_out) override;
size_t state_seq_save_file(
llama_seq_id seq_id,
const char * filepath,
const llama_token * tokens,
size_t n_token_count) override;
private: private:
llama_context_kv_self ctx_dec; std::unique_ptr<llama_context_enc> ctx_enc;
std::unique_ptr<llama_context_dec> ctx_dec;
llama_cross cross;
}; };
// For internal test use // For internal test use

View File

@@ -26,7 +26,29 @@ ggml_tensor * llama_graph_i::build_attn(
return nullptr; return nullptr;
} }
ggml_tensor * llama_graph_i::build_inp_embd_enc( ggml_tensor * llama_graph_i::build_attn_cross(
ggml_context * ctx0,
ggml_cgraph * gf,
ggml_tensor * q_cur,
ggml_tensor * k_cur,
ggml_tensor * v_cur,
ggml_tensor * kq_b,
float kq_scale,
int il) {
GGML_UNUSED(ctx0);
GGML_UNUSED(gf);
GGML_UNUSED(q_cur);
GGML_UNUSED(k_cur);
GGML_UNUSED(v_cur);
GGML_UNUSED(kq_b);
GGML_UNUSED(kq_scale);
GGML_UNUSED(il);
LLAMA_LOG_ERROR("%s: not implemented\n", __func__);
return nullptr;
}
ggml_tensor * llama_graph_i::build_inp_cross_embd(
ggml_context * ctx0) { ggml_context * ctx0) {
GGML_UNUSED(ctx0); GGML_UNUSED(ctx0);
@@ -34,7 +56,7 @@ ggml_tensor * llama_graph_i::build_inp_embd_enc(
return nullptr; return nullptr;
} }
ggml_tensor * llama_graph_i::build_inp_kq_mask_cross( ggml_tensor * llama_graph_i::build_inp_cross_kq_mask(
ggml_context * ctx0, ggml_context * ctx0,
int32_t n_tokens) { int32_t n_tokens) {
GGML_UNUSED(ctx0); GGML_UNUSED(ctx0);

View File

@@ -114,10 +114,20 @@ public:
float kq_scale, float kq_scale,
int il); int il);
virtual ggml_tensor * build_inp_embd_enc( virtual ggml_tensor * build_attn_cross(
ggml_context * ctx0,
ggml_cgraph * gf,
ggml_tensor * q_cur,
ggml_tensor * k_cur,
ggml_tensor * v_cur,
ggml_tensor * kq_b,
float kq_scale,
int il);
virtual ggml_tensor * build_inp_cross_embd(
ggml_context * ctx0); ggml_context * ctx0);
virtual ggml_tensor * build_inp_kq_mask_cross( virtual ggml_tensor * build_inp_cross_kq_mask(
ggml_context * ctx0, ggml_context * ctx0,
int32_t n_tokens); int32_t n_tokens);

View File

@@ -3964,16 +3964,16 @@ struct llm_build_context {
} }
// TODO: tmp // TODO: tmp
struct ggml_tensor * build_inp_embd_enc() { struct ggml_tensor * build_inp_cross_embd() {
ggml_tensor * cur = lgf->build_inp_embd_enc(ctx0); ggml_tensor * cur = lgf->build_inp_cross_embd(ctx0);
cb(cur, "embd_enc", -1); cb(cur, "embd_enc", -1);
return cur; return cur;
} }
// TODO: tmp // TODO: tmp
struct ggml_tensor * build_inp_kq_mask_cross() { struct ggml_tensor * build_inp_cross_kq_mask() {
ggml_tensor * cur = lgf->build_inp_kq_mask_cross(ctx0, n_tokens); ggml_tensor * cur = lgf->build_inp_cross_kq_mask(ctx0, n_tokens);
cb(cur, "KQ_mask_cross", -1); cb(cur, "KQ_mask_cross", -1);
return cur; return cur;
@@ -4294,6 +4294,42 @@ struct llm_build_context {
return cur; return cur;
} }
struct ggml_tensor * build_attn_cross(
struct ggml_cgraph * gf,
struct ggml_tensor * wo,
struct ggml_tensor * wo_b,
struct ggml_tensor * q_cur,
struct ggml_tensor * k_cur,
struct ggml_tensor * v_cur,
int32_t n_tokens, // TODO: remove
float kq_scale,
int il) {
GGML_UNUSED(n_tokens);
// these nodes are added to the graph together so that they are not reordered
// by doing so, the number of splits in the graph is reduced
ggml_build_forward_expand(gf, q_cur);
ggml_build_forward_expand(gf, k_cur);
ggml_build_forward_expand(gf, v_cur);
ggml_tensor * cur = lgf->build_attn_cross(ctx0, gf, q_cur, k_cur, v_cur, nullptr, kq_scale, il);
cb(cur, "kqv_out", il);
if (wo) {
cur = lgf->build_lora_mm(ctx0, wo, cur);
}
if (wo_b) {
//cb(cur, "kqv_wo", il);
}
if (wo_b) {
cur = ggml_add(ctx0, cur, wo_b);
}
return cur;
}
struct ggml_tensor * build_attn_with_kq_b( struct ggml_tensor * build_attn_with_kq_b(
struct ggml_cgraph * gf, struct ggml_cgraph * gf,
struct ggml_tensor * wo, struct ggml_tensor * wo,
@@ -9762,209 +9798,173 @@ struct llm_build_context {
ggml_build_forward_expand(gf, cur); ggml_build_forward_expand(gf, cur);
} }
//void build_t5_dec(ggml_cgraph * gf) { void build_t5_dec(ggml_cgraph * gf) {
// const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
// const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); //const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
// GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
// struct ggml_tensor * cur; struct ggml_tensor * cur;
// struct ggml_tensor * inpL; struct ggml_tensor * inpL;
// inpL = build_inp_embd(model.tok_embd); inpL = build_inp_embd(model.tok_embd);
// GGML_ASSERT(!lctx.is_encoding); struct ggml_tensor * embd_enc = build_inp_cross_embd();
// GGML_ASSERT(n_outputs_enc > 0 && "call llama_encode() first"); struct ggml_tensor * pos_bucket_dec = build_pos_bucket();
// struct ggml_tensor * embd_enc = build_inp_embd_enc(); const int64_t n_outputs_enc = embd_enc->ne[1];
// struct ggml_tensor * pos_bucket_dec = build_pos_bucket(true);
// struct ggml_tensor * KQ_mask_dec = build_inp_kq_mask(); lgf->build_attn_inp(ctx0, n_tokens, true, false);
// struct ggml_tensor * KQ_mask_cross = build_inp_kq_mask_cross();
// for (int il = 0; il < n_layer; ++il) { for (int il = 0; il < n_layer; ++il) {
// struct ggml_tensor * inpSA = inpL; struct ggml_tensor * inpSA = inpL;
// // norm // norm
// cur = build_norm(inpL, cur = build_norm(inpL,
// model.layers[il].attn_norm, NULL, model.layers[il].attn_norm, NULL,
// LLM_NORM_RMS, il); LLM_NORM_RMS, il);
// cb(cur, "attn_norm", il); cb(cur, "attn_norm", il);
// // self-attention // self-attention
// { {
// struct ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); struct ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
// cb(Qcur, "Qcur", il); cb(Qcur, "Qcur", il);
// struct ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); struct ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
// cb(Kcur, "Kcur", il); cb(Kcur, "Kcur", il);
// struct ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); struct ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
// cb(Vcur, "Vcur", il); cb(Vcur, "Vcur", il);
// build_kv_store(gf, Kcur, Vcur, il); Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
// struct ggml_tensor * k = struct ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b ? model.layers[il].attn_rel_b : model.layers[0].attn_rel_b;
// ggml_view_3d(ctx0, kv_self.k_l[il], struct ggml_tensor * kq_b = build_pos_bias(pos_bucket_dec, attn_rel_b);
// n_embd_head_k, n_kv, n_head_kv,
// ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa),
// ggml_row_size(kv_self.k_l[il]->type, n_embd_head_k),
// 0);
// cb(k, "k", il);
// struct ggml_tensor * v = cur = build_attn_with_kq_b(gf,
// ggml_view_3d(ctx0, kv_self.v_l[il], model.layers[il].wo, model.layers[il].bo,
// n_kv, n_embd_head_v, n_head_kv, Qcur, Kcur, Vcur, kq_b, n_tokens, 1.0f, il);
// ggml_element_size(kv_self.v_l[il])*n_ctx, cb(cur, "kqv_out", il);
// ggml_element_size(kv_self.v_l[il])*n_ctx*n_embd_head_v, }
// 0);
// cb(v, "v", il);
// Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); cur = ggml_add(ctx0, cur, inpSA);
cb(cur, "cross_inp", il);
// struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); struct ggml_tensor * inpCA = cur;
// struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); // norm
// cb(kq, "kq", il); cur = build_norm(cur,
model.layers[il].attn_norm_cross, NULL,
LLM_NORM_RMS, il);
cb(cur, "attn_norm_cross", il);
// struct ggml_tensor * attn_rel_b = model.layers[il].attn_rel_b ? model.layers[il].attn_rel_b : model.layers[0].attn_rel_b; // cross-attention
// struct ggml_tensor * pos_bias = build_pos_bias(pos_bucket_dec, attn_rel_b); {
// struct ggml_tensor * kq_b = ggml_add(ctx0, kq, pos_bias); struct ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq_cross, cur);
// cb(kq_b, "kq_b", il); cb(Qcur, "Qcur", il);
// kq = ggml_soft_max_ext(ctx0, kq_b, KQ_mask_dec, 1.0f, hparams.f_max_alibi_bias); struct ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk_cross, embd_enc);
// cb(kq, "kq_soft_max_ext", il); cb(Kcur, "Kcur", il);
// struct ggml_tensor * kqv = ggml_mul_mat(ctx0, v, kq); struct ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv_cross, embd_enc);
// cb(kqv, "kqv", il); cb(Vcur, "Vcur", il);
// struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3); Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
// cb(kqv_merged, "kqv_merged", il); Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_outputs_enc);
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_outputs_enc);
// cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens); cur = build_attn_cross(gf,
// cb(cur, "kqv_merged_cont", il); model.layers[il].wo_cross, nullptr,
Qcur, Kcur, Vcur, n_tokens, 1.0f, il);
cb(cur, "kqv_out", il);
// ggml_build_forward_expand(gf, cur); //struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
//struct ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3));
// cur = build_lora_mm(model.layers[il].wo, cur); //struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
// cb(cur, "kqv_out", il); //cb(kq, "kq", il);
// }
// cur = ggml_add(ctx0, cur, inpSA); //kq = ggml_soft_max_ext(ctx0, kq, KQ_mask_cross, 1.0f, hparams.f_max_alibi_bias);
// cb(cur, "cross_inp", il); //cb(kq, "kq_soft_max_ext", il);
// struct ggml_tensor * inpCA = cur; //struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_outputs_enc)));
//cb(v, "v", il);
// // norm //struct ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_outputs_enc, n_embd_head, n_head_kv), kq);
// cur = build_norm(cur, //cb(kqv, "kqv", il);
// model.layers[il].attn_norm_cross, NULL,
// LLM_NORM_RMS, il);
// cb(cur, "attn_norm_cross", il);
// // cross-attention //struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3);
// { //cb(kqv_merged, "kqv_merged", il);
// struct ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq_cross, cur);
// cb(Qcur, "Qcur", il);
// struct ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk_cross, embd_enc); //cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens);
// cb(Kcur, "Kcur", il); //cb(cur, "kqv_merged_cont", il);
// struct ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv_cross, embd_enc); //ggml_build_forward_expand(gf, cur);
// cb(Vcur, "Vcur", il);
// Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); //cur = build_lora_mm(model.layers[il].wo_cross, cur);
// Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_outputs_enc); //cb(cur, "kqv_out", il);
}
// struct ggml_tensor * q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); if (il == n_layer - 1) {
// struct ggml_tensor * k = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3)); // skip computing output for unused tokens
struct ggml_tensor * inp_out_ids = build_inp_out_ids();
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids);
}
// struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpCA);
// cb(kq, "kq", il); cb(ffn_inp, "ffn_inp", il);
// kq = ggml_soft_max_ext(ctx0, kq, KQ_mask_cross, 1.0f, hparams.f_max_alibi_bias); // feed-forward network
// cb(kq, "kq_soft_max_ext", il); {
cur = build_norm(ffn_inp,
model.layers[il].ffn_norm, NULL,
LLM_NORM_RMS, il);
cb(cur, "ffn_norm", il);
// struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_outputs_enc))); // T5 uses relu, flan-T5 uses gelu-gated
// cb(v, "v", il); cur = build_ffn(cur,
model.layers[il].ffn_up, NULL, NULL,
model.layers[il].ffn_gate, NULL, NULL,
model.layers[il].ffn_down, NULL, NULL,
NULL,
model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU,
model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ,
il);
cb(cur, "ffn_out", il);
}
// struct ggml_tensor * kqv = ggml_mul_mat(ctx0, ggml_reshape_3d(ctx0, v, n_outputs_enc, n_embd_head, n_head_kv), kq); cur = ggml_add(ctx0, cur, ffn_inp);
// cb(kqv, "kqv", il); cb(cur, "ffn_out", il);
// struct ggml_tensor * kqv_merged = ggml_permute(ctx0, kqv, 0, 2, 1, 3); cur = lgf->build_cvec(ctx0, cur, il);
// cb(kqv_merged, "kqv_merged", il); cb(cur, "l_out", il);
// cur = ggml_cont_2d(ctx0, kqv_merged, n_embd_gqa, n_tokens); // input for next layer
// cb(cur, "kqv_merged_cont", il); inpL = cur;
}
// ggml_build_forward_expand(gf, cur); cur = inpL;
cb(cur, "result_embd", -1);
// cur = build_lora_mm(model.layers[il].wo_cross, cur); cur = build_norm(cur,
// cb(cur, "kqv_out", il); model.output_norm, NULL,
// } LLM_NORM_RMS, -1);
// if (il == n_layer - 1) { cb(cur, "result_norm", -1);
// // skip computing output for unused tokens res.t_embd = cur;
// struct ggml_tensor * inp_out_ids = build_inp_out_ids();
// cur = ggml_get_rows(ctx0, cur, inp_out_ids);
// inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
// inpCA = ggml_get_rows(ctx0, inpCA, inp_out_ids);
// }
// struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpCA); // lm_head
// cb(ffn_inp, "ffn_inp", il); cur = build_lora_mm(model.output, cur);
// // feed-forward network cb(cur, "result_output", -1);
// { res.t_logits = cur;
// cur = build_norm(ffn_inp,
// model.layers[il].ffn_norm, NULL,
// LLM_NORM_RMS, il);
// cb(cur, "ffn_norm", il);
// // T5 uses relu, flan-T5 uses gelu-gated ggml_build_forward_expand(gf, cur);
// cur = build_ffn(cur, }
// model.layers[il].ffn_up, NULL, NULL,
// model.layers[il].ffn_gate, NULL, NULL,
// model.layers[il].ffn_down, NULL, NULL,
// NULL,
// model.layers[il].ffn_gate_enc ? LLM_FFN_GELU : LLM_FFN_RELU,
// model.layers[il].ffn_gate_enc ? LLM_FFN_PAR : LLM_FFN_SEQ,
// il);
// cb(cur, "ffn_out", il);
// }
// cur = ggml_add(ctx0, cur, ffn_inp);
// cb(cur, "ffn_out", il);
// ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
// if (layer_dir != nullptr) {
// cur = ggml_add(ctx0, cur, layer_dir);
// }
// cb(cur, "l_out", il);
// // input for next layer
// inpL = cur;
// }
// cur = inpL;
// cb(cur, "result_embd", -1);
// cur = build_norm(cur,
// model.output_norm, NULL,
// LLM_NORM_RMS, -1);
// cb(cur, "result_norm", -1);
// res.t_embd = cur;
// // lm_head
// cur = build_lora_mm(model.output, cur);
// cb(cur, "result_output", -1);
// res.t_logits = cur;
// ggml_build_forward_expand(gf, cur);
// return gf;
//}
void build_jais(ggml_cgraph * gf) { void build_jais(ggml_cgraph * gf) {
const int64_t n_embd_head = hparams.n_embd_head_v; const int64_t n_embd_head = hparams.n_embd_head_v;
@@ -11119,7 +11119,7 @@ llama_graph_result llama_model::build_graph(
llm.build_t5_enc(gf); llm.build_t5_enc(gf);
break; break;
case LLAMA_GRAPH_TYPE_DECODER: case LLAMA_GRAPH_TYPE_DECODER:
//llm.build_t5_dec(gf); llm.build_t5_dec(gf);
break; break;
default: default:
GGML_ABORT("invalid graph type"); GGML_ABORT("invalid graph type");