mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	* json: fix arrays (disallow `[,1]`)
* json: support tuple types (`[number, string]`)
* json: support additionalProperties (`{[k: string]: [string,number][]}`)
* json: support required / optional properties
* json: add support for pattern
* json: resolve $ref (and support https schema urls)
* json: fix $ref resolution
* join: support union types (mostly for nullable types I think)
* json: support allOf + nested anyOf
* json: support any (`{}` or `{type: object}`)
* json: fix merge
* json: temp fix for escapes
* json: spaces in output and unrestricted output spaces
* json: add typings
* json:fix typo
* Create ts-type-to-grammar.sh
* json: fix _format_literal (json.dumps already escapes quotes)
* json: merge lit sequences and handle negatives
{"type": "string", "pattern": "^({\"question\": \"[^\"]+\", \"response\": \"[^\"]+\"}\\n)+$"}
* json: handle pattern repetitions
* Update json-schema-to-grammar.mjs
* Create regex-to-grammar.py
* json: extract repeated regexp patterns to subrule
* Update json-schema-to-grammar.py
* Update json-schema-to-grammar.py
* Update json-schema-to-grammar.py
* json: handle schema from pydantic Optional fields
* Update json-schema-to-grammar.py
* Update json-schema-to-grammar.py
* Update ts-type-to-grammar.sh
* Update ts-type-to-grammar.sh
* json: simplify nullable fields handling
* json: accept duplicate identical rules
* json: revert space to 1 at most
* json: reuse regexp pattern subrules
* json: handle uuid string format
* json: fix literal escapes
* json: add --allow-fetch
* json: simplify range escapes
* json: support negative ranges in patterns
* Delete commit.txt
* json: custom regex parser, adds dot support & JS-portable
* json: rm trailing spaces
* Update json-schema-to-grammar.mjs
* json: updated server & chat `( cd examples/server && ./deps.sh )`
* json: port fixes from mjs to python
* Update ts-type-to-grammar.sh
* json: support prefixItems alongside array items
* json: add date format + fix uuid
* json: add date, time, date-time formats
* json: preserve order of props from TS defs
* json: port schema converter to C++, wire in ./server
* json: nits
* Update json-schema-to-grammar.cpp
* Update json-schema-to-grammar.cpp
* Update json-schema-to-grammar.cpp
* json: fix mjs implementation + align outputs
* Update json-schema-to-grammar.mjs.hpp
* json: test C++, JS & Python versions
* json: nits + regen deps
* json: cleanup test
* json: revert from c++17 to 11
* json: nit fixes
* json: dirty include for test
* json: fix zig build
* json: pass static command to std::system in tests (fixed temp files)
* json: fix top-level $refs
* json: don't use c++20 designated initializers
* nit
* json: basic support for reserved names `{number:{number:{root:number}}}`
* Revamp test cmake to allow args (WORKING_DIRECTORY needed for JSON test)
* json: re-ran server deps.sh
* json: simplify test
* json: support mix of additional props & required/optional
* json: add tests for some expected failures
* json: fix type=const in c++, add failure expectations for non-str const&enum
* json: test (& simplify output of) empty schema
* json: check parsing in test + fix value & string refs
* json: add server tests for OAI JSON response_format
* json: test/fix top-level anyOf
* json: improve grammar parsing failures
* json: test/fix additional props corner cases
* json: fix string patterns (was missing quotes)
* json: ws nit
* json: fix json handling in server when there's no response_format
* json: catch schema conversion errors in server
* json: don't complain about unknown format type in server if unset
* json: cleaner build of test
* json: create examples/json-schema-pydantic-example.py
* json: fix date pattern
* json: move json.hpp & json-schema-to-grammar.{cpp,h} to common
* json: indent 4 spaces
* json: fix naming of top-level c++ function (+ drop unused one)
* json: avoid using namespace std
* json: fix zig build
* Update server.feature
* json: iostream -> fprintf
* json: space before & refs for consistency
* json: nits
		
	
		
			
				
	
	
		
			132 lines
		
	
	
		
			3.8 KiB
		
	
	
	
		
			JavaScript
		
	
	
	
	
	
			
		
		
	
	
			132 lines
		
	
	
		
			3.8 KiB
		
	
	
	
		
			JavaScript
		
	
	
	
	
	
import * as readline from 'node:readline'
 | 
						|
import { stdin, stdout } from 'node:process'
 | 
						|
import { readFileSync } from 'node:fs'
 | 
						|
import { SchemaConverter }  from './public/json-schema-to-grammar.mjs'
 | 
						|
 | 
						|
const args = process.argv.slice(2);
 | 
						|
const grammarJsonSchemaFile = args.find(
 | 
						|
    (_, index) => args[index - 1] === "--grammar-json-schema"
 | 
						|
);
 | 
						|
 | 
						|
const no_cached_prompt = args.find(
 | 
						|
    (_, index) => args[index - 1] === "--no-cache-prompt"
 | 
						|
) ?? "false";
 | 
						|
 | 
						|
const grammarFile = args.find((_, index) => args[index - 1] === "--grammar");
 | 
						|
 | 
						|
// Example usage: function,arguments
 | 
						|
const grammarJsonSchemaPropOrder = args.find(
 | 
						|
    (_, index) => args[index - 1] === "--grammar-json-schema-prop-order"
 | 
						|
);
 | 
						|
const propOrder = grammarJsonSchemaPropOrder
 | 
						|
    ? grammarJsonSchemaPropOrder
 | 
						|
          .split(",")
 | 
						|
          .reduce((acc, cur, index) => ({ ...acc, [cur]: index }), {})
 | 
						|
    : {};
 | 
						|
 | 
						|
let grammar = null
 | 
						|
if (grammarJsonSchemaFile) {
 | 
						|
    let schema = JSON.parse(readFileSync(grammarJsonSchemaFile, 'utf-8'))
 | 
						|
    const converter = new SchemaConverter({prop_order: propOrder, allow_fetch: true})
 | 
						|
    schema = await converter.resolveRefs(schema, grammarJsonSchemaFile)
 | 
						|
    converter.visit(schema, '')
 | 
						|
    grammar = converter.formatGrammar()
 | 
						|
}
 | 
						|
if (grammarFile) {
 | 
						|
    grammar = readFileSync(grammarFile, 'utf-8')
 | 
						|
}
 | 
						|
 | 
						|
// for cached prompt
 | 
						|
let slot_id = -1;
 | 
						|
 | 
						|
const API_URL = 'http://127.0.0.1:8080'
 | 
						|
 | 
						|
const chat = [
 | 
						|
    {
 | 
						|
        human: "Hello, Assistant.",
 | 
						|
        assistant: "Hello. How may I help you today?"
 | 
						|
    },
 | 
						|
    {
 | 
						|
        human: "Please tell me the largest city in Europe.",
 | 
						|
        assistant: "Sure. The largest city in Europe is Moscow, the capital of Russia."
 | 
						|
    },
 | 
						|
]
 | 
						|
 | 
						|
const instruction = `A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.`
 | 
						|
 | 
						|
function format_prompt(question) {
 | 
						|
    return `${instruction}\n${
 | 
						|
        chat.map(m =>`### Human: ${m.human}\n### Assistant: ${m.assistant}`).join("\n")
 | 
						|
    }\n### Human: ${question}\n### Assistant:`
 | 
						|
}
 | 
						|
 | 
						|
async function tokenize(content) {
 | 
						|
    const result = await fetch(`${API_URL}/tokenize`, {
 | 
						|
        method: 'POST',
 | 
						|
        body: JSON.stringify({ content })
 | 
						|
    })
 | 
						|
 | 
						|
    if (!result.ok) {
 | 
						|
        return []
 | 
						|
    }
 | 
						|
 | 
						|
    return await result.json().tokens
 | 
						|
}
 | 
						|
 | 
						|
const n_keep = await tokenize(instruction).length
 | 
						|
 | 
						|
async function chat_completion(question) {
 | 
						|
    const result = await fetch(`${API_URL}/completion`, {
 | 
						|
        method: 'POST',
 | 
						|
        body: JSON.stringify({
 | 
						|
            prompt: format_prompt(question),
 | 
						|
            temperature: 0.2,
 | 
						|
            top_k: 40,
 | 
						|
            top_p: 0.9,
 | 
						|
            n_keep: n_keep,
 | 
						|
            n_predict: 256,
 | 
						|
            cache_prompt: no_cached_prompt === "false",
 | 
						|
            slot_id: slot_id,
 | 
						|
            stop: ["\n### Human:"], // stop completion after generating this
 | 
						|
            grammar,
 | 
						|
            stream: true,
 | 
						|
        })
 | 
						|
    })
 | 
						|
 | 
						|
    if (!result.ok) {
 | 
						|
        return
 | 
						|
    }
 | 
						|
 | 
						|
    let answer = ''
 | 
						|
 | 
						|
    for await (var chunk of result.body) {
 | 
						|
        const t = Buffer.from(chunk).toString('utf8')
 | 
						|
        if (t.startsWith('data: ')) {
 | 
						|
            const message = JSON.parse(t.substring(6))
 | 
						|
            slot_id = message.slot_id
 | 
						|
            answer += message.content
 | 
						|
            process.stdout.write(message.content)
 | 
						|
            if (message.stop) {
 | 
						|
                if (message.truncated) {
 | 
						|
                    chat.shift()
 | 
						|
                }
 | 
						|
                break
 | 
						|
            }
 | 
						|
        }
 | 
						|
    }
 | 
						|
 | 
						|
    process.stdout.write('\n')
 | 
						|
    chat.push({ human: question, assistant: answer.trimStart() })
 | 
						|
}
 | 
						|
 | 
						|
const rl = readline.createInterface({ input: stdin, output: stdout });
 | 
						|
 | 
						|
const readlineQuestion = (rl, query, options) => new Promise((resolve, reject) => {
 | 
						|
    rl.question(query, options, resolve)
 | 
						|
});
 | 
						|
 | 
						|
while(true) {
 | 
						|
    const question = await readlineQuestion(rl, '> ')
 | 
						|
    await chat_completion(question)
 | 
						|
}
 |