mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	llama : initial Mamba-2 support (#9126)
* llama : initial Mamba-2 support * ggml : SIMD ggml_ssm_scan for Mamba-2 * ggml : improve ggml_mul speed when masking recurrent states * llama : support running Mamba-Codestral-7B-v0.1 * llama : fix Mamba-2 conv state saving * ggml : make the ggml_mul fast broadcast path more consistently formatted * llama : remove unused variable * llama : add missing break * convert_hf : prefer SentencePiece tokenizer for Mamba-2 when present The tokenzier.json of Mamba-Codestral-7B-v0.1 otherwise requires workarounds to work correctly. * llama : avoid redundant state copy for Mamba 1 and 2 * metal : attempt to adapt SSM_SCAN for Mamba-2 * metal : fix SSM_SCAN pipeline scope * metal : use log and exp instead of log1pf and expf in SSM_SCAN * metal : remove unused arguments for SSM_SCAN The max index is 31, so trimming the arguments is necessary. * metal : add back n_seqs to SSM_SCAN args Whoops, this is needed for the offset in the concatenated output. * metal : fix SSM_SCAN state head offset * metal : fix wrong number of tokens per sequence in SSM_SCAN * ggml : remove unused fast broadcast path in GGML_MUL This was initially added because states were masked with ggml_mul, but this is no longer done and so this "optimisation" is no longer necessary, or at least not worth the additional code complexity. * ggml : avoid multiply by D in GGML_OP_SSM_SCAN This makes the weight buft detection in src/llama.cpp simpler. * convert : transpose Mamba-2 A, D and reshape SSM_NORM This breaks existing conversions of Mamba-2 models to avoid some reshapes. Not sure if it's a good idea, but it makes the graph slightly cleaner. * llama : more appropriate SSM_SCAN and SSM_CONV buft support checks * convert : fix flake8 lint * metal : fix confusion between ; and , * metal : add missing args for nb references in ssm_scan_f32_group * metal : single-user mamba2 inference works * kv-cache : remove const_cast when setting inputs for s_copy And also fix multi-user inference for recurrent models by using cell_id instead of i as the kv cell index when populating s_copy. * convert : avoid AutoConfig for Mamba and Mamba2 hparams * kv-cache : allow context shift for recurrent models * graph : fix recurrent state copies when avoiding copies Works, but using lambda functions might not be that clean. * ggml : fix mamba2 ssm scan when compiled with SVE * ggml-cpu : reorder SVE FMA for consistency with other SIMD arches * cuda : implement ssm scan for Mamba2 There is still room for improvement, but it works! * cuda : adapt Mamba1 ssm scan to shape changes from Mamba2 * mamba : fix mismatched new and delete size for llm_build_mamba Subclasses of llm_graph_context cannot have extra fields, because the called destructor is not the one from the subclass. This otherwise would cause problems when runnning Mamba-(1|2) inference when compiled -DGGML_SANITIZE_ADDRESS=ON * cuda : graceful fallback for Mamba-1 models with weird embd size
This commit is contained in:
		| @@ -45,6 +45,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = { | ||||
|     { LLM_ARCH_GEMMA3N,          "gemma3n"          }, | ||||
|     { LLM_ARCH_STARCODER2,       "starcoder2"       }, | ||||
|     { LLM_ARCH_MAMBA,            "mamba"            }, | ||||
|     { LLM_ARCH_MAMBA2,           "mamba2"           }, | ||||
|     { LLM_ARCH_XVERSE,           "xverse"           }, | ||||
|     { LLM_ARCH_COMMAND_R,        "command-r"        }, | ||||
|     { LLM_ARCH_COHERE2,          "cohere2"          }, | ||||
| @@ -170,6 +171,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = { | ||||
|     { LLM_KV_SSM_INNER_SIZE,     "%s.ssm.inner_size"     }, | ||||
|     { LLM_KV_SSM_STATE_SIZE,     "%s.ssm.state_size"     }, | ||||
|     { LLM_KV_SSM_TIME_STEP_RANK, "%s.ssm.time_step_rank" }, | ||||
|     { LLM_KV_SSM_GROUP_COUNT,    "%s.ssm.group_count"    }, | ||||
|     { LLM_KV_SSM_DT_B_C_RMS,     "%s.ssm.dt_b_c_rms"     }, | ||||
|  | ||||
|     { LLM_KV_WKV_HEAD_SIZE, "%s.wkv.head_size" }, | ||||
| @@ -1004,6 +1006,22 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N | ||||
|             { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" }, | ||||
|         }, | ||||
|     }, | ||||
|     { | ||||
|         LLM_ARCH_MAMBA2, | ||||
|         { | ||||
|             { LLM_TENSOR_TOKEN_EMBD,      "token_embd" }, | ||||
|             { LLM_TENSOR_OUTPUT_NORM,     "output_norm" }, | ||||
|             { LLM_TENSOR_OUTPUT,          "output" }, | ||||
|             { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" }, | ||||
|             { LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" }, | ||||
|             { LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" }, | ||||
|             { LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" }, | ||||
|             { LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" }, | ||||
|             { LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" }, | ||||
|             { LLM_TENSOR_SSM_NORM,        "blk.%d.ssm_norm" }, | ||||
|             { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" }, | ||||
|         }, | ||||
|     }, | ||||
|     { | ||||
|         LLM_ARCH_XVERSE, | ||||
|         { | ||||
| @@ -1761,6 +1779,7 @@ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = { | ||||
|     {LLM_TENSOR_SSM_CONV1D,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}}, | ||||
|     {LLM_TENSOR_SSM_A,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_SCAN}}, | ||||
|     {LLM_TENSOR_SSM_D,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, | ||||
|     {LLM_TENSOR_SSM_NORM,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, | ||||
|     {LLM_TENSOR_TIME_MIX_LERP_X,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, | ||||
|     {LLM_TENSOR_TIME_MIX_LN,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, | ||||
|     {LLM_TENSOR_CHANNEL_MIX_LERP_K,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, | ||||
| @@ -1894,6 +1913,7 @@ const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor) { | ||||
| bool llm_arch_is_recurrent(const llm_arch & arch) { | ||||
|     switch (arch) { | ||||
|         case LLM_ARCH_MAMBA: | ||||
|         case LLM_ARCH_MAMBA2: | ||||
|         case LLM_ARCH_RWKV6: | ||||
|         case LLM_ARCH_RWKV6QWEN2: | ||||
|         case LLM_ARCH_RWKV7: | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 compilade
					compilade