mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-03 09:22:01 +00:00 
			
		
		
		
	model : add Jamba to Mamba-specific hparams printing
This commit is contained in:
		@@ -4842,16 +4842,6 @@ void llama_model::print_info() const {
 | 
			
		||||
        LLAMA_LOG_INFO("%s: freq_scale_train = %g\n",     __func__, hparams.rope_freq_scale_train);
 | 
			
		||||
        LLAMA_LOG_INFO("%s: n_ctx_orig_yarn  = %u\n",     __func__, hparams.n_ctx_orig_yarn);
 | 
			
		||||
        LLAMA_LOG_INFO("%s: rope_finetuned   = %s\n",     __func__, hparams.rope_finetuned ? "yes" : "unknown");
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (arch == LLM_ARCH_MAMBA || arch == LLM_ARCH_MAMBA2) {
 | 
			
		||||
        LLAMA_LOG_INFO("%s: ssm_d_conv       = %u\n",     __func__, hparams.ssm_d_conv);
 | 
			
		||||
        LLAMA_LOG_INFO("%s: ssm_d_inner      = %u\n",     __func__, hparams.ssm_d_inner);
 | 
			
		||||
        LLAMA_LOG_INFO("%s: ssm_d_state      = %u\n",     __func__, hparams.ssm_d_state);
 | 
			
		||||
        LLAMA_LOG_INFO("%s: ssm_dt_rank      = %u\n",     __func__, hparams.ssm_dt_rank);
 | 
			
		||||
        LLAMA_LOG_INFO("%s: ssm_n_group      = %u\n",     __func__, hparams.ssm_n_group);
 | 
			
		||||
        LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms   = %d\n",     __func__, hparams.ssm_dt_b_c_rms);
 | 
			
		||||
 | 
			
		||||
        if (!classifier_labels.empty()) {
 | 
			
		||||
            LLAMA_LOG_INFO("%s: n_cls_out        = %u\n", __func__, hparams.n_cls_out);
 | 
			
		||||
 | 
			
		||||
@@ -4862,6 +4852,15 @@ void llama_model::print_info() const {
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (arch == LLM_ARCH_MAMBA || arch == LLM_ARCH_MAMBA2 || arch == LLM_ARCH_JAMBA) {
 | 
			
		||||
        LLAMA_LOG_INFO("%s: ssm_d_conv       = %u\n",     __func__, hparams.ssm_d_conv);
 | 
			
		||||
        LLAMA_LOG_INFO("%s: ssm_d_inner      = %u\n",     __func__, hparams.ssm_d_inner);
 | 
			
		||||
        LLAMA_LOG_INFO("%s: ssm_d_state      = %u\n",     __func__, hparams.ssm_d_state);
 | 
			
		||||
        LLAMA_LOG_INFO("%s: ssm_dt_rank      = %u\n",     __func__, hparams.ssm_dt_rank);
 | 
			
		||||
        LLAMA_LOG_INFO("%s: ssm_n_group      = %u\n",     __func__, hparams.ssm_n_group);
 | 
			
		||||
        LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms   = %d\n",     __func__, hparams.ssm_dt_b_c_rms);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    LLAMA_LOG_INFO("%s: model type       = %s\n",     __func__, type_name().c_str());
 | 
			
		||||
    if (pimpl->n_elements >= 1e12) {
 | 
			
		||||
        LLAMA_LOG_INFO("%s: model params     = %.2f T\n", __func__, pimpl->n_elements*1e-12);
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user