mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	Don't crash on ftype (formerly f16) == 4 (#917)
This commit is contained in:
		| @@ -827,7 +827,9 @@ static const char *llama_ftype_name(enum llama_ftype ftype) { | ||||
|         case LLAMA_FTYPE_MOSTLY_F16:  return "mostly F16"; | ||||
|         case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0"; | ||||
|         case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1"; | ||||
|         default: LLAMA_ASSERT(false); | ||||
|         case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16: | ||||
|                                       return "mostly Q4_1, some F16"; | ||||
|         default:                      return "unknown, may not work"; | ||||
|     } | ||||
| } | ||||
|  | ||||
|   | ||||
							
								
								
									
										1
									
								
								llama.h
									
									
									
									
									
								
							
							
						
						
									
										1
									
								
								llama.h
									
									
									
									
									
								
							| @@ -71,6 +71,7 @@ extern "C" { | ||||
|         LLAMA_FTYPE_MOSTLY_F16  = 1,  // except 1d tensors | ||||
|         LLAMA_FTYPE_MOSTLY_Q4_0 = 2,  // except 1d tensors | ||||
|         LLAMA_FTYPE_MOSTLY_Q4_1 = 3,  // except 1d tensors | ||||
|         LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 | ||||
|     }; | ||||
|  | ||||
|     LLAMA_API struct llama_context_params llama_context_default_params(); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Stephan Walter
					Stephan Walter