mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	 9c1ba55733
			
		
	
	9c1ba55733
	
	
	
		
			
			* style: format with nixfmt/rfc101-style * build(nix): Package gguf-py * build(nix): Refactor to new scope for gguf-py * build(nix): Exclude gguf-py from devShells * build(nix): Refactor gguf-py derivation to take in exact deps * build(nix): Enable pytestCheckHook and pythonImportsCheck for gguf-py * build(python): Package python scripts with pyproject.toml * chore: Cleanup * dev(nix): Break up python/C devShells * build(python): Relax pytorch version constraint Nix has an older version * chore: Move cmake to nativeBuildInputs for devShell * fmt: Reconcile formatting with rebase * style: nix fmt * cleanup: Remove unncessary __init__.py * chore: Suggestions from review - Filter out non-source files from llama-scripts flake derivation - Clean up unused closure - Remove scripts devShell * revert: Bad changes * dev: Simplify devShells, restore the -extra devShell * build(nix): Add pyyaml for gguf-py * chore: Remove some unused bindings * dev: Add tiktoken to -extra devShells
		
			
				
	
	
		
			67 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			Nix
		
	
	
	
	
	
			
		
		
	
	
			67 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			Nix
		
	
	
	
	
	
| {
 | |
|   lib,
 | |
|   stdenv,
 | |
|   buildPythonPackage,
 | |
|   poetry-core,
 | |
|   mkShell,
 | |
|   python3Packages,
 | |
|   gguf-py,
 | |
| }@inputs:
 | |
| 
 | |
| let
 | |
|   llama-python-deps = with python3Packages; [
 | |
|     numpy
 | |
|     sentencepiece
 | |
|     transformers
 | |
|     protobuf
 | |
|     torchWithoutCuda
 | |
|     gguf-py
 | |
|     tqdm
 | |
| 
 | |
|     # for scripts/compare-llama-bench.py
 | |
|     gitpython
 | |
|     tabulate
 | |
| 
 | |
|     # for examples/pydantic-models-to-grammar-examples.py
 | |
|     docstring-parser
 | |
|     pydantic
 | |
| 
 | |
|   ];
 | |
| 
 | |
|   llama-python-test-deps = with python3Packages; [
 | |
|     # Server bench
 | |
|     matplotlib
 | |
| 
 | |
|     # server tests
 | |
|     openai
 | |
|     behave
 | |
|     prometheus-client
 | |
|   ];
 | |
| in
 | |
| 
 | |
| buildPythonPackage ({
 | |
|   pname = "llama-scripts";
 | |
|   version = "0.0.0";
 | |
|   pyproject = true;
 | |
| 
 | |
|   # NOTE: The files filtered out here are not visible in the build sandbox, neither
 | |
|   # do they affect the output hash. They can be modified without triggering a rebuild.
 | |
|   src = lib.cleanSourceWith {
 | |
|     filter =
 | |
|       name: type:
 | |
|       let
 | |
|         any = builtins.any (x: x);
 | |
|         baseName = builtins.baseNameOf name;
 | |
|       in
 | |
|       any [
 | |
|         (lib.hasSuffix ".py" name)
 | |
|         (baseName == "README.md")
 | |
|         (baseName == "pyproject.toml")
 | |
|       ];
 | |
|     src = lib.cleanSource ../../.;
 | |
|   };
 | |
|   nativeBuildInputs = [ poetry-core ];
 | |
|   nativeCheckInputs = llama-python-test-deps;
 | |
|   dependencies = llama-python-deps;
 | |
| })
 |