mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-10-30 08:42:00 +00:00 
			
		
		
		
	json-schema-to-grammar improvements (+ added to server) (#5978)
* json: fix arrays (disallow `[,1]`)
* json: support tuple types (`[number, string]`)
* json: support additionalProperties (`{[k: string]: [string,number][]}`)
* json: support required / optional properties
* json: add support for pattern
* json: resolve $ref (and support https schema urls)
* json: fix $ref resolution
* join: support union types (mostly for nullable types I think)
* json: support allOf + nested anyOf
* json: support any (`{}` or `{type: object}`)
* json: fix merge
* json: temp fix for escapes
* json: spaces in output and unrestricted output spaces
* json: add typings
* json:fix typo
* Create ts-type-to-grammar.sh
* json: fix _format_literal (json.dumps already escapes quotes)
* json: merge lit sequences and handle negatives
{"type": "string", "pattern": "^({\"question\": \"[^\"]+\", \"response\": \"[^\"]+\"}\\n)+$"}
* json: handle pattern repetitions
* Update json-schema-to-grammar.mjs
* Create regex-to-grammar.py
* json: extract repeated regexp patterns to subrule
* Update json-schema-to-grammar.py
* Update json-schema-to-grammar.py
* Update json-schema-to-grammar.py
* json: handle schema from pydantic Optional fields
* Update json-schema-to-grammar.py
* Update json-schema-to-grammar.py
* Update ts-type-to-grammar.sh
* Update ts-type-to-grammar.sh
* json: simplify nullable fields handling
* json: accept duplicate identical rules
* json: revert space to 1 at most
* json: reuse regexp pattern subrules
* json: handle uuid string format
* json: fix literal escapes
* json: add --allow-fetch
* json: simplify range escapes
* json: support negative ranges in patterns
* Delete commit.txt
* json: custom regex parser, adds dot support & JS-portable
* json: rm trailing spaces
* Update json-schema-to-grammar.mjs
* json: updated server & chat `( cd examples/server && ./deps.sh )`
* json: port fixes from mjs to python
* Update ts-type-to-grammar.sh
* json: support prefixItems alongside array items
* json: add date format + fix uuid
* json: add date, time, date-time formats
* json: preserve order of props from TS defs
* json: port schema converter to C++, wire in ./server
* json: nits
* Update json-schema-to-grammar.cpp
* Update json-schema-to-grammar.cpp
* Update json-schema-to-grammar.cpp
* json: fix mjs implementation + align outputs
* Update json-schema-to-grammar.mjs.hpp
* json: test C++, JS & Python versions
* json: nits + regen deps
* json: cleanup test
* json: revert from c++17 to 11
* json: nit fixes
* json: dirty include for test
* json: fix zig build
* json: pass static command to std::system in tests (fixed temp files)
* json: fix top-level $refs
* json: don't use c++20 designated initializers
* nit
* json: basic support for reserved names `{number:{number:{root:number}}}`
* Revamp test cmake to allow args (WORKING_DIRECTORY needed for JSON test)
* json: re-ran server deps.sh
* json: simplify test
* json: support mix of additional props & required/optional
* json: add tests for some expected failures
* json: fix type=const in c++, add failure expectations for non-str const&enum
* json: test (& simplify output of) empty schema
* json: check parsing in test + fix value & string refs
* json: add server tests for OAI JSON response_format
* json: test/fix top-level anyOf
* json: improve grammar parsing failures
* json: test/fix additional props corner cases
* json: fix string patterns (was missing quotes)
* json: ws nit
* json: fix json handling in server when there's no response_format
* json: catch schema conversion errors in server
* json: don't complain about unknown format type in server if unset
* json: cleaner build of test
* json: create examples/json-schema-pydantic-example.py
* json: fix date pattern
* json: move json.hpp & json-schema-to-grammar.{cpp,h} to common
* json: indent 4 spaces
* json: fix naming of top-level c++ function (+ drop unused one)
* json: avoid using namespace std
* json: fix zig build
* Update server.feature
* json: iostream -> fprintf
* json: space before & refs for consistency
* json: nits
			
			
This commit is contained in:
		| @@ -59,6 +59,7 @@ def step_server_config(context, server_fqdn, server_port): | ||||
|     context.seed = None | ||||
|     context.server_seed = None | ||||
|     context.user_api_key = None | ||||
|     context.response_format = None | ||||
|  | ||||
|     context.tasks_result = [] | ||||
|     context.concurrent_tasks = [] | ||||
| @@ -269,6 +270,11 @@ def step_max_tokens(context, max_tokens): | ||||
|     context.n_predict = max_tokens | ||||
|  | ||||
|  | ||||
| @step('a response format {response_format}') | ||||
| def step_response_format(context, response_format): | ||||
|     context.response_format = json.loads(response_format) | ||||
|  | ||||
|  | ||||
| @step('streaming is {enable_streaming}') | ||||
| def step_streaming(context, enable_streaming): | ||||
|     context.enable_streaming = enable_streaming == 'enabled' | ||||
| @@ -384,6 +390,9 @@ async def step_oai_chat_completions(context, api_error): | ||||
|                                             enable_streaming=context.enable_streaming | ||||
|                                             if hasattr(context, 'enable_streaming') else None, | ||||
|  | ||||
|                                             response_format=context.response_format | ||||
|                                             if hasattr(context, 'response_format') else None, | ||||
|  | ||||
|                                             seed=await completions_seed(context), | ||||
|  | ||||
|                                             user_api_key=context.user_api_key | ||||
| @@ -443,6 +452,8 @@ async def step_oai_chat_completions(context): | ||||
|                               if hasattr(context, 'n_predict') else None, | ||||
|                               enable_streaming=context.enable_streaming | ||||
|                               if hasattr(context, 'enable_streaming') else None, | ||||
|                               response_format=context.response_format | ||||
|                               if hasattr(context, 'response_format') else None, | ||||
|                               seed=await completions_seed(context), | ||||
|                               user_api_key=context.user_api_key | ||||
|                               if hasattr(context, 'user_api_key') else None) | ||||
| @@ -463,6 +474,8 @@ async def step_oai_chat_completions(context): | ||||
|                               if hasattr(context, 'n_predict') else None, | ||||
|                               enable_streaming=context.enable_streaming | ||||
|                               if hasattr(context, 'enable_streaming') else None, | ||||
|                               response_format=context.response_format | ||||
|                               if hasattr(context, 'response_format') else None, | ||||
|                               seed=context.seed | ||||
|                               if hasattr(context, 'seed') else | ||||
|                               context.server_seed | ||||
| @@ -745,6 +758,7 @@ async def oai_chat_completions(user_prompt, | ||||
|                                model=None, | ||||
|                                n_predict=None, | ||||
|                                enable_streaming=None, | ||||
|                                response_format=None, | ||||
|                                seed=None, | ||||
|                                user_api_key=None, | ||||
|                                expect_api_error=None): | ||||
| @@ -770,6 +784,8 @@ async def oai_chat_completions(user_prompt, | ||||
|         "stream": enable_streaming, | ||||
|         "seed": seed | ||||
|     } | ||||
|     if response_format is not None: | ||||
|         payload['response_format'] = response_format | ||||
|     completion_response = { | ||||
|         'content': '', | ||||
|         'timings': { | ||||
| @@ -830,6 +846,7 @@ async def oai_chat_completions(user_prompt, | ||||
|                 model=model, | ||||
|                 max_tokens=n_predict, | ||||
|                 stream=enable_streaming, | ||||
|                 response_format=payload.get('response_format'), | ||||
|                 seed=seed | ||||
|             ) | ||||
|         except openai.error.AuthenticationError as e: | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Olivier Chafik
					Olivier Chafik