mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-27 08:21:30 +00:00
* torch is not required for convert_hf_to_gguf_update * add --check-missing parameter * check that pre-tokenizer hashes are up-to-date
46 lines
1.6 KiB
YAML
46 lines
1.6 KiB
YAML
name: Check Pre-Tokenizer Hashes
|
|
|
|
on:
|
|
push:
|
|
paths:
|
|
- 'convert_hf_to_gguf.py'
|
|
- 'convert_hf_to_gguf_update.py'
|
|
pull_request:
|
|
paths:
|
|
- 'convert_hf_to_gguf.py'
|
|
- 'convert_hf_to_gguf_update.py'
|
|
|
|
jobs:
|
|
pre-tokenizer-hashes:
|
|
runs-on: ubuntu-latest
|
|
|
|
steps:
|
|
- name: Checkout repository
|
|
uses: actions/checkout@v4
|
|
|
|
- name: Set up Python
|
|
uses: actions/setup-python@v5
|
|
with:
|
|
python-version: '3.11'
|
|
|
|
- name: Install Python dependencies
|
|
run: |
|
|
python3 -m venv .venv
|
|
.venv/bin/pip install -r requirements/requirements-convert_hf_to_gguf_update.txt
|
|
|
|
- name: Update pre-tokenizer hashes
|
|
run: |
|
|
cp convert_hf_to_gguf.py /tmp
|
|
.venv/bin/python convert_hf_to_gguf_update.py --check-missing
|
|
|
|
- name: Check if committed pre-tokenizer hashes matches generated version
|
|
run: |
|
|
if ! diff -q convert_hf_to_gguf.py /tmp/convert_hf_to_gguf.py; then
|
|
echo "Model pre-tokenizer hashes (in convert_hf_to_gguf.py) do not match generated hashes (from convert_hf_to_gguf_update.py)."
|
|
echo "To fix: run ./convert_hf_to_gguf_update.py and commit the updated convert_hf_to_gguf.py along with your changes"
|
|
echo "Differences found:"
|
|
diff convert_hf_to_gguf.py /tmp/convert_hf_to_gguf.py || true
|
|
exit 1
|
|
fi
|
|
echo "Model pre-tokenizer hashes are up to date."
|