mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-11-06 09:46:50 +00:00
moe cleanuips
This commit is contained in:
@@ -120,14 +120,12 @@ struct llama_hparams {
|
||||
|
||||
uint32_t attn_head_dim = 0;
|
||||
bool mamba_rms_norm = false;
|
||||
double rope_theta = 10000.0;
|
||||
uint32_t vocab_size = 0;
|
||||
uint32_t intermediate_size = 0;
|
||||
float mamba_expand = 0.0f;
|
||||
float mamba_expand = 0.0f;
|
||||
bool ssm_rms_norm = false;
|
||||
bool ssm_conv_bias = false;
|
||||
bool ssm_proj_bias = false;
|
||||
uint32_t chunk_size = 0;
|
||||
|
||||
// for hybrid state space models
|
||||
std::array<bool, LLAMA_MAX_LAYERS> recurrent_layer_arr;
|
||||
|
||||
Reference in New Issue
Block a user