mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-31 08:51:55 +00:00 
			
		
		
		
	 e937066420
			
		
	
	e937066420
	
	
	
		
			
			* gguf-py : export chat templates * llama.cpp : escape new lines in gguf kv info prints * gguf-py : bump version * gguf-py : check chat_template type * gguf-py : initialize chat_template
		
			
				
	
	
		
			36 lines
		
	
	
		
			921 B
		
	
	
	
		
			TOML
		
	
	
	
	
	
			
		
		
	
	
			36 lines
		
	
	
		
			921 B
		
	
	
	
		
			TOML
		
	
	
	
	
	
| [tool.poetry]
 | |
| name = "gguf"
 | |
| version = "0.6.0"
 | |
| description = "Read and write ML models in GGUF for GGML"
 | |
| authors = ["GGML <ggml@ggml.ai>"]
 | |
| packages = [
 | |
|     {include = "gguf"},
 | |
|     {include = "gguf/py.typed"},
 | |
|     {include = "scripts"},
 | |
| ]
 | |
| readme = "README.md"
 | |
| homepage = "https://ggml.ai"
 | |
| repository = "https://github.com/ggerganov/llama.cpp"
 | |
| keywords = ["ggml", "gguf", "llama.cpp"]
 | |
| classifiers = [
 | |
|     "Programming Language :: Python :: 3",
 | |
|     "License :: OSI Approved :: MIT License",
 | |
|     "Operating System :: OS Independent",
 | |
| ]
 | |
| 
 | |
| [tool.poetry.dependencies]
 | |
| python = ">=3.8"
 | |
| numpy = ">=1.17"
 | |
| 
 | |
| [tool.poetry.dev-dependencies]
 | |
| pytest = "^5.2"
 | |
| 
 | |
| [build-system]
 | |
| requires = ["poetry-core>=1.0.0"]
 | |
| build-backend = "poetry.core.masonry.api"
 | |
| 
 | |
| [tool.poetry.scripts]
 | |
| gguf-convert-endian = "scripts:gguf_convert_endian_entrypoint"
 | |
| gguf-dump = "scripts:gguf_dump_entrypoint"
 | |
| gguf-set-metadata = "scripts:gguf_set_metadata_entrypoint"
 |