mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	Merge branch 'master' into gg/llama-kv-cache
ggml-ci
This commit is contained in:
		
										
											Binary file not shown.
										
									
								
							@@ -274,7 +274,7 @@ struct server_task {
 | 
			
		||||
        params.speculative.p_min = json_value(data, "speculative.p_min", defaults.speculative.p_min);
 | 
			
		||||
 | 
			
		||||
        params.speculative.n_min = std::min(params.speculative.n_max, params.speculative.n_min);
 | 
			
		||||
        params.speculative.n_min = std::max(params.speculative.n_min, 2);
 | 
			
		||||
        params.speculative.n_min = std::max(params.speculative.n_min, 0);
 | 
			
		||||
        params.speculative.n_max = std::max(params.speculative.n_max, 0);
 | 
			
		||||
 | 
			
		||||
        // Use OpenAI API logprobs only if n_probs wasn't provided
 | 
			
		||||
@@ -329,9 +329,6 @@ struct server_task {
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // process "json_schema" and "grammar"
 | 
			
		||||
        if (data.contains("json_schema") && !data.at("json_schema").is_null() && data.contains("grammar") && !data.at("grammar").is_null()) {
 | 
			
		||||
            throw std::runtime_error("Either \"json_schema\" or \"grammar\" can be specified, but not both");
 | 
			
		||||
        }
 | 
			
		||||
        if (data.contains("json_schema") && !data.contains("grammar")) {
 | 
			
		||||
            try {
 | 
			
		||||
                auto schema                  = json_value(data, "json_schema", json::object());
 | 
			
		||||
@@ -1807,7 +1804,7 @@ struct server_context {
 | 
			
		||||
    // Necessary similarity of prompt for slot selection
 | 
			
		||||
    float slot_prompt_similarity = 0.0f;
 | 
			
		||||
 | 
			
		||||
    common_chat_templates chat_templates;
 | 
			
		||||
    common_chat_templates_ptr chat_templates;
 | 
			
		||||
 | 
			
		||||
    ~server_context() {
 | 
			
		||||
        // Clear any sampling context
 | 
			
		||||
@@ -1891,45 +1888,17 @@ struct server_context {
 | 
			
		||||
            llama_init_dft.context.reset();
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (params_base.chat_template.empty() && !validate_builtin_chat_template(params.use_jinja)) {
 | 
			
		||||
        chat_templates = common_chat_templates_init(model, params_base.chat_template);
 | 
			
		||||
        try {
 | 
			
		||||
            common_chat_format_example(chat_templates.get(), params.use_jinja);
 | 
			
		||||
        } catch (const std::exception & e) {
 | 
			
		||||
            SRV_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n", __func__);
 | 
			
		||||
            chat_templates = common_chat_templates_from_model(model, "chatml");
 | 
			
		||||
        } else {
 | 
			
		||||
            chat_templates = common_chat_templates_from_model(model, params_base.chat_template);
 | 
			
		||||
            chat_templates = common_chat_templates_init(model, "chatml");
 | 
			
		||||
        }
 | 
			
		||||
        GGML_ASSERT(chat_templates.template_default.get() != nullptr);
 | 
			
		||||
 | 
			
		||||
        return true;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    bool validate_builtin_chat_template(bool use_jinja) const {
 | 
			
		||||
        llama_chat_message chat[] = {{"user", "test"}};
 | 
			
		||||
 | 
			
		||||
        if (use_jinja) {
 | 
			
		||||
            auto templates = common_chat_templates_from_model(model, "");
 | 
			
		||||
            common_chat_inputs inputs;
 | 
			
		||||
            inputs.messages = json::array({{
 | 
			
		||||
                {"role", "user"},
 | 
			
		||||
                {"content", "test"},
 | 
			
		||||
            }});
 | 
			
		||||
            GGML_ASSERT(templates.template_default);
 | 
			
		||||
            try {
 | 
			
		||||
                common_chat_params_init(*templates.template_default, inputs);
 | 
			
		||||
                if (templates.template_tool_use) {
 | 
			
		||||
                    common_chat_params_init(*templates.template_tool_use, inputs);
 | 
			
		||||
                }
 | 
			
		||||
                return true;
 | 
			
		||||
            } catch (const std::exception & e) {
 | 
			
		||||
                SRV_ERR("failed to apply template: %s\n", e.what());
 | 
			
		||||
                return false;
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            const char * tmpl = llama_model_chat_template(model, /* name */ nullptr);
 | 
			
		||||
            const int32_t chat_res = llama_chat_apply_template(tmpl, chat, 1, true, nullptr, 0);
 | 
			
		||||
            return chat_res > 0;
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    void init() {
 | 
			
		||||
        const int32_t n_ctx_slot = n_ctx / params_base.n_parallel;
 | 
			
		||||
 | 
			
		||||
@@ -3822,13 +3791,15 @@ int main(int argc, char ** argv) {
 | 
			
		||||
            { "default_generation_settings", ctx_server.default_generation_settings_for_props },
 | 
			
		||||
            { "total_slots",                 ctx_server.params_base.n_parallel },
 | 
			
		||||
            { "model_path",                  ctx_server.params_base.model },
 | 
			
		||||
            { "chat_template",               ctx_server.chat_templates.template_default->source() },
 | 
			
		||||
            { "bos_token",                   ctx_server.chat_templates.template_default->bos_token() },
 | 
			
		||||
            { "eos_token",                   ctx_server.chat_templates.template_default->eos_token() },
 | 
			
		||||
            { "chat_template",               common_chat_templates_source(ctx_server.chat_templates.get()) },
 | 
			
		||||
            { "bos_token",                   common_token_to_piece(ctx_server.ctx, llama_vocab_bos(ctx_server.vocab), /* special= */ true)},
 | 
			
		||||
            { "eos_token",                   common_token_to_piece(ctx_server.ctx, llama_vocab_eos(ctx_server.vocab), /* special= */ true)},
 | 
			
		||||
            { "build_info",                  build_info },
 | 
			
		||||
        };
 | 
			
		||||
        if (ctx_server.params_base.use_jinja && ctx_server.chat_templates.template_tool_use) {
 | 
			
		||||
            data["chat_template_tool_use"] = ctx_server.chat_templates.template_tool_use->source();
 | 
			
		||||
        if (ctx_server.params_base.use_jinja) {
 | 
			
		||||
            if (auto tool_use_src = common_chat_templates_source(ctx_server.chat_templates.get(), "tool_use")) {
 | 
			
		||||
                data["chat_template_tool_use"] = tool_use_src;
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        res_ok(res, data);
 | 
			
		||||
@@ -4063,7 +4034,7 @@ int main(int argc, char ** argv) {
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        auto body = json::parse(req.body);
 | 
			
		||||
        json data = oaicompat_completion_params_parse(body, params.use_jinja, params.reasoning_format, ctx_server.chat_templates);
 | 
			
		||||
        json data = oaicompat_completion_params_parse(body, params.use_jinja, params.reasoning_format, ctx_server.chat_templates.get());
 | 
			
		||||
 | 
			
		||||
        return handle_completions_impl(
 | 
			
		||||
            SERVER_TASK_TYPE_COMPLETION,
 | 
			
		||||
@@ -4076,7 +4047,7 @@ int main(int argc, char ** argv) {
 | 
			
		||||
    // same with handle_chat_completions, but without inference part
 | 
			
		||||
    const auto handle_apply_template = [&ctx_server, ¶ms, &res_ok](const httplib::Request & req, httplib::Response & res) {
 | 
			
		||||
        auto body = json::parse(req.body);
 | 
			
		||||
        json data = oaicompat_completion_params_parse(body, params.use_jinja, params.reasoning_format, ctx_server.chat_templates);
 | 
			
		||||
        json data = oaicompat_completion_params_parse(body, params.use_jinja, params.reasoning_format, ctx_server.chat_templates.get());
 | 
			
		||||
        res_ok(res, {{ "prompt", std::move(data.at("prompt")) }});
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
@@ -4263,6 +4234,11 @@ int main(int argc, char ** argv) {
 | 
			
		||||
        //    return;
 | 
			
		||||
        //}
 | 
			
		||||
 | 
			
		||||
        // if true, use TEI API format, otherwise use Jina API format
 | 
			
		||||
        // Jina: https://jina.ai/reranker/
 | 
			
		||||
        // TEI: https://huggingface.github.io/text-embeddings-inference/#/Text%20Embeddings%20Inference/rerank
 | 
			
		||||
        bool is_tei_format = body.contains("texts");
 | 
			
		||||
 | 
			
		||||
        json query;
 | 
			
		||||
        if (body.count("query") == 1) {
 | 
			
		||||
            query = body.at("query");
 | 
			
		||||
@@ -4275,7 +4251,8 @@ int main(int argc, char ** argv) {
 | 
			
		||||
            return;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        std::vector<std::string> documents = json_value(body, "documents", std::vector<std::string>());
 | 
			
		||||
        std::vector<std::string> documents = json_value(body, "documents",
 | 
			
		||||
                                             json_value(body, "texts", std::vector<std::string>()));
 | 
			
		||||
        if (documents.empty()) {
 | 
			
		||||
            res_error(res, format_error_response("\"documents\" must be a non-empty string array", ERROR_TYPE_INVALID_REQUEST));
 | 
			
		||||
            return;
 | 
			
		||||
@@ -4320,7 +4297,12 @@ int main(int argc, char ** argv) {
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        // write JSON response
 | 
			
		||||
        json root = format_response_rerank(body, responses);
 | 
			
		||||
        json root = format_response_rerank(
 | 
			
		||||
            body,
 | 
			
		||||
            responses,
 | 
			
		||||
            is_tei_format,
 | 
			
		||||
            documents);
 | 
			
		||||
 | 
			
		||||
        res_ok(res, root);
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
@@ -4482,8 +4464,8 @@ int main(int argc, char ** argv) {
 | 
			
		||||
 | 
			
		||||
    // print sample chat example to make it clear which template is used
 | 
			
		||||
    LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
 | 
			
		||||
        ctx_server.chat_templates.template_default->source().c_str(),
 | 
			
		||||
        common_chat_format_example(*ctx_server.chat_templates.template_default, ctx_server.params_base.use_jinja).c_str());
 | 
			
		||||
        common_chat_templates_source(ctx_server.chat_templates.get()),
 | 
			
		||||
        common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str());
 | 
			
		||||
 | 
			
		||||
    ctx_server.queue_tasks.on_new_task([&ctx_server](const server_task & task) {
 | 
			
		||||
        ctx_server.process_single_task(task);
 | 
			
		||||
 
 | 
			
		||||
@@ -48,7 +48,7 @@ DEBUG=1 ./tests.sh -s -v -x
 | 
			
		||||
To run all the tests in a file:
 | 
			
		||||
 | 
			
		||||
```shell
 | 
			
		||||
./tests.sh unit/test_chat_completion.py.py -v -x
 | 
			
		||||
./tests.sh unit/test_chat_completion.py -v -x
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
To run a single test:
 | 
			
		||||
 
 | 
			
		||||
@@ -21,6 +21,8 @@ def create_server():
 | 
			
		||||
        (None, "Book", "What is the best book", 8, "^ blue",                    23, 8, "length", True, "This is not a chat template, it is"),
 | 
			
		||||
        ("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, "length", False, None),
 | 
			
		||||
        ("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, "length", True, None),
 | 
			
		||||
        (None, "Book", [{"type": "text", "text": "What is"}, {"type": "text", "text": "the best book"}], 8, "Whillicter", 79, 8, "length", False, None),
 | 
			
		||||
        (None, "Book", [{"type": "text", "text": "What is"}, {"type": "text", "text": "the best book"}], 8, "Whillicter", 79, 8, "length", True, None),
 | 
			
		||||
    ]
 | 
			
		||||
)
 | 
			
		||||
def test_chat_completion(model, system_prompt, user_prompt, max_tokens, re_content, n_prompt, n_predicted, finish_reason, jinja, chat_template):
 | 
			
		||||
@@ -44,7 +46,7 @@ def test_chat_completion(model, system_prompt, user_prompt, max_tokens, re_conte
 | 
			
		||||
    assert res.body["usage"]["completion_tokens"] == n_predicted
 | 
			
		||||
    choice = res.body["choices"][0]
 | 
			
		||||
    assert "assistant" == choice["message"]["role"]
 | 
			
		||||
    assert match_regex(re_content, choice["message"]["content"])
 | 
			
		||||
    assert match_regex(re_content, choice["message"]["content"]), f'Expected {re_content}, got {choice["message"]["content"]}'
 | 
			
		||||
    assert choice["finish_reason"] == finish_reason
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -169,6 +171,47 @@ def test_completion_with_response_format(response_format: dict, n_predicted: int
 | 
			
		||||
        assert "error" in res.body
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@pytest.mark.parametrize("jinja,json_schema,n_predicted,re_content", [
 | 
			
		||||
    (False, {"const": "42"}, 6, "\"42\""),
 | 
			
		||||
    (True, {"const": "42"}, 6, "\"42\""),
 | 
			
		||||
])
 | 
			
		||||
def test_completion_with_json_schema(jinja: bool, json_schema: dict, n_predicted: int, re_content: str):
 | 
			
		||||
    global server
 | 
			
		||||
    server.jinja = jinja
 | 
			
		||||
    server.start()
 | 
			
		||||
    res = server.make_request("POST", "/chat/completions", data={
 | 
			
		||||
        "max_tokens": n_predicted,
 | 
			
		||||
        "messages": [
 | 
			
		||||
            {"role": "system", "content": "You are a coding assistant."},
 | 
			
		||||
            {"role": "user", "content": "Write an example"},
 | 
			
		||||
        ],
 | 
			
		||||
        "json_schema": json_schema,
 | 
			
		||||
    })
 | 
			
		||||
    assert res.status_code == 200, f'Expected 200, got {res.status_code}'
 | 
			
		||||
    choice = res.body["choices"][0]
 | 
			
		||||
    assert match_regex(re_content, choice["message"]["content"]), f'Expected {re_content}, got {choice["message"]["content"]}'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@pytest.mark.parametrize("jinja,grammar,n_predicted,re_content", [
 | 
			
		||||
    (False, 'root ::= "a"{5,5}', 6, "a{5,5}"),
 | 
			
		||||
    (True, 'root ::= "a"{5,5}', 6, "a{5,5}"),
 | 
			
		||||
])
 | 
			
		||||
def test_completion_with_grammar(jinja: bool, grammar: str, n_predicted: int, re_content: str):
 | 
			
		||||
    global server
 | 
			
		||||
    server.jinja = jinja
 | 
			
		||||
    server.start()
 | 
			
		||||
    res = server.make_request("POST", "/chat/completions", data={
 | 
			
		||||
        "max_tokens": n_predicted,
 | 
			
		||||
        "messages": [
 | 
			
		||||
            {"role": "user", "content": "Does not matter what I say, does it?"},
 | 
			
		||||
        ],
 | 
			
		||||
        "grammar": grammar,
 | 
			
		||||
    })
 | 
			
		||||
    assert res.status_code == 200, res.body
 | 
			
		||||
    choice = res.body["choices"][0]
 | 
			
		||||
    assert match_regex(re_content, choice["message"]["content"]), choice["message"]["content"]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@pytest.mark.parametrize("messages", [
 | 
			
		||||
    None,
 | 
			
		||||
    "string",
 | 
			
		||||
 
 | 
			
		||||
@@ -10,17 +10,20 @@ def create_server():
 | 
			
		||||
    server = ServerPreset.jina_reranker_tiny()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
TEST_DOCUMENTS = [
 | 
			
		||||
    "A machine is a physical system that uses power to apply forces and control movement to perform an action. The term is commonly applied to artificial devices, such as those employing engines or motors, but also to natural biological macromolecules, such as molecular machines.",
 | 
			
		||||
    "Learning is the process of acquiring new understanding, knowledge, behaviors, skills, values, attitudes, and preferences. The ability to learn is possessed by humans, non-human animals, and some machines; there is also evidence for some kind of learning in certain plants.",
 | 
			
		||||
    "Machine learning is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks without explicit instructions.",
 | 
			
		||||
    "Paris, capitale de la France, est une grande ville européenne et un centre mondial de l'art, de la mode, de la gastronomie et de la culture. Son paysage urbain du XIXe siècle est traversé par de larges boulevards et la Seine."
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def test_rerank():
 | 
			
		||||
    global server
 | 
			
		||||
    server.start()
 | 
			
		||||
    res = server.make_request("POST", "/rerank", data={
 | 
			
		||||
        "query": "Machine learning is",
 | 
			
		||||
        "documents": [
 | 
			
		||||
            "A machine is a physical system that uses power to apply forces and control movement to perform an action. The term is commonly applied to artificial devices, such as those employing engines or motors, but also to natural biological macromolecules, such as molecular machines.",
 | 
			
		||||
            "Learning is the process of acquiring new understanding, knowledge, behaviors, skills, values, attitudes, and preferences. The ability to learn is possessed by humans, non-human animals, and some machines; there is also evidence for some kind of learning in certain plants.",
 | 
			
		||||
            "Machine learning is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks without explicit instructions.",
 | 
			
		||||
            "Paris, capitale de la France, est une grande ville européenne et un centre mondial de l'art, de la mode, de la gastronomie et de la culture. Son paysage urbain du XIXe siècle est traversé par de larges boulevards et la Seine."
 | 
			
		||||
        ]
 | 
			
		||||
        "documents": TEST_DOCUMENTS,
 | 
			
		||||
    })
 | 
			
		||||
    assert res.status_code == 200
 | 
			
		||||
    assert len(res.body["results"]) == 4
 | 
			
		||||
@@ -38,6 +41,29 @@ def test_rerank():
 | 
			
		||||
    assert least_relevant["index"] == 3
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def test_rerank_tei_format():
 | 
			
		||||
    global server
 | 
			
		||||
    server.start()
 | 
			
		||||
    res = server.make_request("POST", "/rerank", data={
 | 
			
		||||
        "query": "Machine learning is",
 | 
			
		||||
        "texts": TEST_DOCUMENTS,
 | 
			
		||||
    })
 | 
			
		||||
    assert res.status_code == 200
 | 
			
		||||
    assert len(res.body) == 4
 | 
			
		||||
 | 
			
		||||
    most_relevant = res.body[0]
 | 
			
		||||
    least_relevant = res.body[0]
 | 
			
		||||
    for doc in res.body:
 | 
			
		||||
        if doc["score"] > most_relevant["score"]:
 | 
			
		||||
            most_relevant = doc
 | 
			
		||||
        if doc["score"] < least_relevant["score"]:
 | 
			
		||||
            least_relevant = doc
 | 
			
		||||
 | 
			
		||||
    assert most_relevant["score"] > least_relevant["score"]
 | 
			
		||||
    assert most_relevant["index"] == 2
 | 
			
		||||
    assert least_relevant["index"] == 3
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@pytest.mark.parametrize("documents", [
 | 
			
		||||
    [],
 | 
			
		||||
    None,
 | 
			
		||||
 
 | 
			
		||||
@@ -356,12 +356,12 @@ def test_weather(hf_repo: str, template_override: str | Tuple[str, str | None] |
 | 
			
		||||
    (None,                                           128,  "bartowski/functionary-small-v3.2-GGUF:Q8_0",        ("meetkai/functionary-medium-v3.2", None)),
 | 
			
		||||
    (None,                                           128,  "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M",  None),
 | 
			
		||||
    (None,                                           128,  "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M",  None),
 | 
			
		||||
    ("^> 0.56$",                                     128,  "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M",  "chatml"),
 | 
			
		||||
    (None,                                           128,  "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M",  "chatml"),
 | 
			
		||||
    (None,                                           128,  "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M",       None),
 | 
			
		||||
 | 
			
		||||
    # TODO: fix these (wrong results, either didn't respect decimal instruction or got wrong value)
 | 
			
		||||
    ("^The y-coordinate [\\s\\S]*?\\*\\*0.5\\*\\*",  8192, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
 | 
			
		||||
    ("[\\s\\S]*?\\*\\*0\\.5\\*\\*",                  8192, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)),
 | 
			
		||||
    ("[\\s\\S]*?\\*\\*\\s*0.5($|\\*\\*)",            8192, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
 | 
			
		||||
    # ("[\\s\\S]*?\\*\\*\\s*0.5($|\\*\\*)",            8192, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)),
 | 
			
		||||
])
 | 
			
		||||
def test_calc_result(result_override: str | None, n_predict: int, hf_repo: str, template_override: str | Tuple[str, str | None] | None):
 | 
			
		||||
    global server
 | 
			
		||||
@@ -401,7 +401,7 @@ def test_calc_result(result_override: str | None, n_predict: int, hf_repo: str,
 | 
			
		||||
            {
 | 
			
		||||
                "role": "tool",
 | 
			
		||||
                "name": "calculate",
 | 
			
		||||
                "content": 0.55644242476,
 | 
			
		||||
                "content": "0.55644242476",
 | 
			
		||||
                "tool_call_id": "call_6789"
 | 
			
		||||
            }
 | 
			
		||||
        ],
 | 
			
		||||
@@ -444,7 +444,7 @@ def test_calc_result(result_override: str | None, n_predict: int, hf_repo: str,
 | 
			
		||||
    (128,  None,        "^The sum of 102 and 7 is 109.*",                       None,                                          "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M",       None),
 | 
			
		||||
 | 
			
		||||
    (1024, 'deepseek',  "To find the sum of.*",                                 "I need to calculate the sum of 102 and 7.*",  "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
 | 
			
		||||
    (1024, 'none',      "<think>\n?I need[\\s\\S]*?</think>\n?To find.*",       None,                                          "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
 | 
			
		||||
    (1024, 'none',      "^I need[\\s\\S]*?</think>\n?To find.*",                None,                                          "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
 | 
			
		||||
 | 
			
		||||
    (1024, 'deepseek',  "To find the sum of.*",                                 "First, I [\\s\\S]*",                          "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", ("llama-cpp-deepseek-r1", None)),
 | 
			
		||||
])
 | 
			
		||||
 
 | 
			
		||||
@@ -12,9 +12,7 @@
 | 
			
		||||
// Change JSON_ASSERT from assert() to GGML_ASSERT:
 | 
			
		||||
#define JSON_ASSERT GGML_ASSERT
 | 
			
		||||
#include "json.hpp"
 | 
			
		||||
#include "minja.hpp"
 | 
			
		||||
#include "chat.hpp"
 | 
			
		||||
#include "chat-template.hpp"
 | 
			
		||||
#include "chat.h"
 | 
			
		||||
 | 
			
		||||
#include <random>
 | 
			
		||||
#include <sstream>
 | 
			
		||||
@@ -347,41 +345,6 @@ static llama_tokens format_infill(
 | 
			
		||||
    return embd_inp;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Format given chat. If tmpl is empty, we take the template from model metadata
 | 
			
		||||
inline std::string format_chat(const common_chat_template & tmpl, const std::vector<json> & messages) {
 | 
			
		||||
    std::vector<common_chat_msg> chat;
 | 
			
		||||
 | 
			
		||||
    for (size_t i = 0; i < messages.size(); ++i) {
 | 
			
		||||
        const auto & curr_msg = messages[i];
 | 
			
		||||
 | 
			
		||||
        std::string role = json_value(curr_msg, "role", std::string(""));
 | 
			
		||||
 | 
			
		||||
        std::string content;
 | 
			
		||||
        if (curr_msg.contains("content")) {
 | 
			
		||||
            if (curr_msg["content"].is_string()) {
 | 
			
		||||
                content = curr_msg["content"].get<std::string>();
 | 
			
		||||
            } else if (curr_msg["content"].is_array()) {
 | 
			
		||||
                for (const auto & part : curr_msg["content"]) {
 | 
			
		||||
                    if (part.contains("text")) {
 | 
			
		||||
                        content += "\n" + part["text"].get<std::string>();
 | 
			
		||||
                    }
 | 
			
		||||
                }
 | 
			
		||||
            } else {
 | 
			
		||||
                throw std::runtime_error("Invalid 'content' type (ref: https://github.com/ggml-org/llama.cpp/issues/8367)");
 | 
			
		||||
            }
 | 
			
		||||
        } else {
 | 
			
		||||
            throw std::runtime_error("Missing 'content' (ref: https://github.com/ggml-org/llama.cpp/issues/8367)");
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        chat.push_back({role, content, /* tool_calls= */ {}});
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    const auto formatted_chat = common_chat_apply_template(tmpl, chat, true, /* use_jinja= */ false);
 | 
			
		||||
    LOG_DBG("formatted_chat: '%s'\n", formatted_chat.c_str());
 | 
			
		||||
 | 
			
		||||
    return formatted_chat;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//
 | 
			
		||||
// base64 utils (TODO: move to common in the future)
 | 
			
		||||
//
 | 
			
		||||
@@ -579,12 +542,9 @@ static json oaicompat_completion_params_parse(
 | 
			
		||||
    const json & body, /* openai api json semantics */
 | 
			
		||||
    bool use_jinja,
 | 
			
		||||
    common_reasoning_format reasoning_format,
 | 
			
		||||
    const common_chat_templates & chat_templates)
 | 
			
		||||
    const struct common_chat_templates * tmpls)
 | 
			
		||||
{
 | 
			
		||||
    json llama_params;
 | 
			
		||||
    const auto & tmpl = body.contains("tools") && chat_templates.template_tool_use
 | 
			
		||||
        ? *chat_templates.template_tool_use
 | 
			
		||||
        : *chat_templates.template_default;
 | 
			
		||||
 | 
			
		||||
    auto tools = json_value(body, "tools", json());
 | 
			
		||||
    auto stream = json_value(body, "stream", false);
 | 
			
		||||
@@ -610,62 +570,58 @@ static json oaicompat_completion_params_parse(
 | 
			
		||||
        llama_params["stop"] = json_value(body, "stop", json::array());
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    auto json_schema = json_value(body, "json_schema", json());
 | 
			
		||||
    auto grammar = json_value(body, "grammar", std::string());
 | 
			
		||||
    if (!json_schema.is_null() && !grammar.empty()) {
 | 
			
		||||
        throw std::runtime_error("Cannot use both json_schema and grammar");
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Handle "response_format" field
 | 
			
		||||
    if (body.contains("response_format")) {
 | 
			
		||||
        json response_format      = json_value(body, "response_format", json::object());
 | 
			
		||||
        std::string response_type = json_value(response_format, "type", std::string());
 | 
			
		||||
        if (response_type == "json_object") {
 | 
			
		||||
            llama_params["json_schema"] = json_value(response_format, "schema", json::object());
 | 
			
		||||
            json_schema = json_value(response_format, "schema", json::object());
 | 
			
		||||
        } else if (response_type == "json_schema") {
 | 
			
		||||
            json json_schema = json_value(response_format, "json_schema", json::object());
 | 
			
		||||
            llama_params["json_schema"] = json_value(json_schema, "schema", json::object());
 | 
			
		||||
            json_schema = json_value(json_schema, "schema", json::object());
 | 
			
		||||
        } else if (!response_type.empty() && response_type != "text") {
 | 
			
		||||
            throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Apply chat template to the list of messages
 | 
			
		||||
    if (use_jinja) {
 | 
			
		||||
        auto tool_choice = json_value(body, "tool_choice", std::string("auto"));
 | 
			
		||||
        if (tool_choice != "none" && tool_choice != "auto" && tool_choice != "required") {
 | 
			
		||||
            throw std::runtime_error("Invalid tool_choice: " + tool_choice);
 | 
			
		||||
        }
 | 
			
		||||
        if (tool_choice != "none" && llama_params.contains("grammar")) {
 | 
			
		||||
            throw std::runtime_error("Cannot use custom grammar constraints with tools.");
 | 
			
		||||
        }
 | 
			
		||||
        common_chat_inputs inputs;
 | 
			
		||||
        inputs.extract_reasoning   = reasoning_format != COMMON_REASONING_FORMAT_NONE;
 | 
			
		||||
        inputs.messages            = body.at("messages");
 | 
			
		||||
        inputs.tools               = tools;
 | 
			
		||||
        inputs.tool_choice         = tool_choice;
 | 
			
		||||
        inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false);
 | 
			
		||||
        if (inputs.parallel_tool_calls && !tmpl.original_caps().supports_parallel_tool_calls) {
 | 
			
		||||
            LOG_DBG("Disabling parallel_tool_calls because the template does not support it\n");
 | 
			
		||||
            inputs.parallel_tool_calls = false;
 | 
			
		||||
        }
 | 
			
		||||
        inputs.stream = stream;
 | 
			
		||||
        // TODO: support mixing schema w/ tools beyond generic format.
 | 
			
		||||
        inputs.json_schema = json_value(llama_params, "json_schema", json());
 | 
			
		||||
        auto chat_params = common_chat_params_init(tmpl, inputs);
 | 
			
		||||
    common_chat_templates_inputs inputs;
 | 
			
		||||
    inputs.messages              = common_chat_msgs_parse_oaicompat(body.at("messages"));
 | 
			
		||||
    inputs.tools                 = common_chat_tools_parse_oaicompat(tools);
 | 
			
		||||
    inputs.tool_choice           = common_chat_tool_choice_parse_oaicompat(json_value(body, "tool_choice", std::string("auto")));
 | 
			
		||||
    inputs.json_schema           = json_schema.is_null() ? "" : json_schema.dump();
 | 
			
		||||
    inputs.grammar               = grammar;
 | 
			
		||||
    inputs.add_generation_prompt = true;
 | 
			
		||||
    inputs.use_jinja             = use_jinja;
 | 
			
		||||
    inputs.parallel_tool_calls   = json_value(body, "parallel_tool_calls", false);
 | 
			
		||||
    inputs.extract_reasoning     = reasoning_format != COMMON_REASONING_FORMAT_NONE;
 | 
			
		||||
    if (!inputs.tools.empty() && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE && body.contains("grammar")) {
 | 
			
		||||
        throw std::runtime_error("Cannot use custom grammar constraints with tools.");
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
        llama_params["chat_format"] = static_cast<int>(chat_params.format);
 | 
			
		||||
        llama_params["prompt"] = chat_params.prompt;
 | 
			
		||||
        llama_params["grammar"] = chat_params.grammar;
 | 
			
		||||
        llama_params["grammar_lazy"] = chat_params.grammar_lazy;
 | 
			
		||||
        auto grammar_triggers = json::array();
 | 
			
		||||
        for (const auto & trigger : chat_params.grammar_triggers) {
 | 
			
		||||
            grammar_triggers.push_back({
 | 
			
		||||
                {"word", trigger.word},
 | 
			
		||||
                {"at_start", trigger.at_start},
 | 
			
		||||
            });
 | 
			
		||||
        }
 | 
			
		||||
        llama_params["grammar_triggers"] = grammar_triggers;
 | 
			
		||||
        llama_params["preserved_tokens"] = chat_params.preserved_tokens;
 | 
			
		||||
        for (const auto & stop : chat_params.additional_stops) {
 | 
			
		||||
            llama_params["stop"].push_back(stop);
 | 
			
		||||
        }
 | 
			
		||||
    } else {
 | 
			
		||||
        llama_params["prompt"] = format_chat(tmpl, body.at("messages"));
 | 
			
		||||
    // Apply chat template to the list of messages
 | 
			
		||||
    auto chat_params = common_chat_templates_apply(tmpls, inputs);
 | 
			
		||||
 | 
			
		||||
    llama_params["chat_format"]      = static_cast<int>(chat_params.format);
 | 
			
		||||
    llama_params["prompt"]           = chat_params.prompt;
 | 
			
		||||
    llama_params["grammar"]          = chat_params.grammar;
 | 
			
		||||
    llama_params["grammar_lazy"]     = chat_params.grammar_lazy;
 | 
			
		||||
    auto grammar_triggers = json::array();
 | 
			
		||||
    for (const auto & trigger : chat_params.grammar_triggers) {
 | 
			
		||||
        grammar_triggers.push_back({
 | 
			
		||||
            {"word", trigger.word},
 | 
			
		||||
            {"at_start", trigger.at_start},
 | 
			
		||||
        });
 | 
			
		||||
    }
 | 
			
		||||
    llama_params["grammar_triggers"] = grammar_triggers;
 | 
			
		||||
    llama_params["preserved_tokens"] = chat_params.preserved_tokens;
 | 
			
		||||
    for (const auto & stop : chat_params.additional_stops) {
 | 
			
		||||
        llama_params["stop"].push_back(stop);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    // Handle "n" field
 | 
			
		||||
@@ -737,29 +693,51 @@ static json format_embeddings_response_oaicompat(const json & request, const jso
 | 
			
		||||
    return res;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static json format_response_rerank(const json & request, const json & ranks) {
 | 
			
		||||
    json data = json::array();
 | 
			
		||||
    int32_t n_tokens = 0;
 | 
			
		||||
    int i = 0;
 | 
			
		||||
    for (const auto & rank : ranks) {
 | 
			
		||||
        data.push_back(json{
 | 
			
		||||
            {"index",    i++},
 | 
			
		||||
            {"relevance_score", json_value(rank, "score", 0.0)},
 | 
			
		||||
        });
 | 
			
		||||
static json format_response_rerank(
 | 
			
		||||
        const json & request,
 | 
			
		||||
        const json & ranks,
 | 
			
		||||
        bool is_tei_format,
 | 
			
		||||
        std::vector<std::string> & texts) {
 | 
			
		||||
    json res;
 | 
			
		||||
    if (is_tei_format) {
 | 
			
		||||
        // TEI response format
 | 
			
		||||
        res = json::array();
 | 
			
		||||
        bool return_text = json_value(request, "return_text", false);
 | 
			
		||||
        for (const auto & rank : ranks) {
 | 
			
		||||
            int index = json_value(rank, "index", 0);
 | 
			
		||||
            json elem = json{
 | 
			
		||||
                {"index", index},
 | 
			
		||||
                {"score", json_value(rank, "score", 0.0)},
 | 
			
		||||
            };
 | 
			
		||||
            if (return_text) {
 | 
			
		||||
                elem["text"] = std::move(texts[index]);
 | 
			
		||||
            }
 | 
			
		||||
            res.push_back(elem);
 | 
			
		||||
        }
 | 
			
		||||
    } else {
 | 
			
		||||
        // Jina response format
 | 
			
		||||
        json results = json::array();
 | 
			
		||||
        int32_t n_tokens = 0;
 | 
			
		||||
        for (const auto & rank : ranks) {
 | 
			
		||||
            results.push_back(json{
 | 
			
		||||
                {"index",           json_value(rank, "index", 0)},
 | 
			
		||||
                {"relevance_score", json_value(rank, "score", 0.0)},
 | 
			
		||||
            });
 | 
			
		||||
 | 
			
		||||
        n_tokens += json_value(rank, "tokens_evaluated", 0);
 | 
			
		||||
            n_tokens += json_value(rank, "tokens_evaluated", 0);
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        res = json{
 | 
			
		||||
            {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
 | 
			
		||||
            {"object", "list"},
 | 
			
		||||
            {"usage", json{
 | 
			
		||||
                {"prompt_tokens", n_tokens},
 | 
			
		||||
                {"total_tokens", n_tokens}
 | 
			
		||||
            }},
 | 
			
		||||
            {"results", results}
 | 
			
		||||
        };
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    json res = json {
 | 
			
		||||
        {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
 | 
			
		||||
        {"object", "list"},
 | 
			
		||||
        {"usage", json {
 | 
			
		||||
            {"prompt_tokens", n_tokens},
 | 
			
		||||
            {"total_tokens", n_tokens}
 | 
			
		||||
        }},
 | 
			
		||||
        {"results", data}
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    return res;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -159,6 +159,35 @@ export default function ChatMessage({
 | 
			
		||||
                        </div>
 | 
			
		||||
                      </details>
 | 
			
		||||
                    )}
 | 
			
		||||
 | 
			
		||||
                    {msg.extra && msg.extra.length > 0 && (
 | 
			
		||||
                      <details
 | 
			
		||||
                        className={classNames({
 | 
			
		||||
                          'collapse collapse-arrow mb-4 bg-base-200': true,
 | 
			
		||||
                          'bg-opacity-10': msg.role !== 'assistant',
 | 
			
		||||
                        })}
 | 
			
		||||
                      >
 | 
			
		||||
                        <summary className="collapse-title">
 | 
			
		||||
                          Extra content
 | 
			
		||||
                        </summary>
 | 
			
		||||
                        <div className="collapse-content">
 | 
			
		||||
                          {msg.extra.map(
 | 
			
		||||
                            (extra, i) =>
 | 
			
		||||
                              extra.type === 'textFile' ? (
 | 
			
		||||
                                <div key={extra.name}>
 | 
			
		||||
                                  <b>{extra.name}</b>
 | 
			
		||||
                                  <pre>{extra.content}</pre>
 | 
			
		||||
                                </div>
 | 
			
		||||
                              ) : extra.type === 'context' ? (
 | 
			
		||||
                                <div key={i}>
 | 
			
		||||
                                  <pre>{extra.content}</pre>
 | 
			
		||||
                                </div>
 | 
			
		||||
                              ) : null // TODO: support other extra types
 | 
			
		||||
                          )}
 | 
			
		||||
                        </div>
 | 
			
		||||
                      </details>
 | 
			
		||||
                    )}
 | 
			
		||||
 | 
			
		||||
                    <MarkdownDisplay
 | 
			
		||||
                      content={content}
 | 
			
		||||
                      isGenerating={isPending}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,10 +1,11 @@
 | 
			
		||||
import { useEffect, useMemo, useState } from 'react';
 | 
			
		||||
import { useEffect, useMemo, useRef, useState } from 'react';
 | 
			
		||||
import { CallbackGeneratedChunk, useAppContext } from '../utils/app.context';
 | 
			
		||||
import ChatMessage from './ChatMessage';
 | 
			
		||||
import { CanvasType, Message, PendingMessage } from '../utils/types';
 | 
			
		||||
import { classNames, throttle } from '../utils/misc';
 | 
			
		||||
import CanvasPyInterpreter from './CanvasPyInterpreter';
 | 
			
		||||
import StorageUtils from '../utils/storage';
 | 
			
		||||
import { useVSCodeContext } from '../utils/llama-vscode';
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * A message display is a message node with additional information for rendering.
 | 
			
		||||
@@ -81,6 +82,14 @@ export default function ChatScreen() {
 | 
			
		||||
    replaceMessageAndGenerate,
 | 
			
		||||
  } = useAppContext();
 | 
			
		||||
  const [inputMsg, setInputMsg] = useState('');
 | 
			
		||||
  const inputRef = useRef<HTMLTextAreaElement>(null);
 | 
			
		||||
 | 
			
		||||
  const { extraContext, clearExtraContext } = useVSCodeContext(
 | 
			
		||||
    inputRef,
 | 
			
		||||
    setInputMsg
 | 
			
		||||
  );
 | 
			
		||||
  // TODO: improve this when we have "upload file" feature
 | 
			
		||||
  const currExtra: Message['extra'] = extraContext ? [extraContext] : undefined;
 | 
			
		||||
 | 
			
		||||
  // keep track of leaf node for rendering
 | 
			
		||||
  const [currNodeId, setCurrNodeId] = useState<number>(-1);
 | 
			
		||||
@@ -115,10 +124,20 @@ export default function ChatScreen() {
 | 
			
		||||
    setCurrNodeId(-1);
 | 
			
		||||
    // get the last message node
 | 
			
		||||
    const lastMsgNodeId = messages.at(-1)?.msg.id ?? null;
 | 
			
		||||
    if (!(await sendMessage(currConvId, lastMsgNodeId, inputMsg, onChunk))) {
 | 
			
		||||
    if (
 | 
			
		||||
      !(await sendMessage(
 | 
			
		||||
        currConvId,
 | 
			
		||||
        lastMsgNodeId,
 | 
			
		||||
        inputMsg,
 | 
			
		||||
        currExtra,
 | 
			
		||||
        onChunk
 | 
			
		||||
      ))
 | 
			
		||||
    ) {
 | 
			
		||||
      // restore the input message if failed
 | 
			
		||||
      setInputMsg(lastInpMsg);
 | 
			
		||||
    }
 | 
			
		||||
    // OK
 | 
			
		||||
    clearExtraContext();
 | 
			
		||||
  };
 | 
			
		||||
 | 
			
		||||
  const handleEditMessage = async (msg: Message, content: string) => {
 | 
			
		||||
@@ -129,6 +148,7 @@ export default function ChatScreen() {
 | 
			
		||||
      viewingChat.conv.id,
 | 
			
		||||
      msg.parent,
 | 
			
		||||
      content,
 | 
			
		||||
      msg.extra,
 | 
			
		||||
      onChunk
 | 
			
		||||
    );
 | 
			
		||||
    setCurrNodeId(-1);
 | 
			
		||||
@@ -143,6 +163,7 @@ export default function ChatScreen() {
 | 
			
		||||
      viewingChat.conv.id,
 | 
			
		||||
      msg.parent,
 | 
			
		||||
      null,
 | 
			
		||||
      msg.extra,
 | 
			
		||||
      onChunk
 | 
			
		||||
    );
 | 
			
		||||
    setCurrNodeId(-1);
 | 
			
		||||
@@ -203,6 +224,7 @@ export default function ChatScreen() {
 | 
			
		||||
          <textarea
 | 
			
		||||
            className="textarea textarea-bordered w-full"
 | 
			
		||||
            placeholder="Type a message (Shift+Enter to add a new line)"
 | 
			
		||||
            ref={inputRef}
 | 
			
		||||
            value={inputMsg}
 | 
			
		||||
            onChange={(e) => setInputMsg(e.target.value)}
 | 
			
		||||
            onKeyDown={(e) => {
 | 
			
		||||
 
 | 
			
		||||
@@ -25,6 +25,7 @@ interface AppContextValue {
 | 
			
		||||
    convId: string | null,
 | 
			
		||||
    leafNodeId: Message['id'] | null,
 | 
			
		||||
    content: string,
 | 
			
		||||
    extra: Message['extra'],
 | 
			
		||||
    onChunk: CallbackGeneratedChunk
 | 
			
		||||
  ) => Promise<boolean>;
 | 
			
		||||
  stopGenerating: (convId: string) => void;
 | 
			
		||||
@@ -32,6 +33,7 @@ interface AppContextValue {
 | 
			
		||||
    convId: string,
 | 
			
		||||
    parentNodeId: Message['id'], // the parent node of the message to be replaced
 | 
			
		||||
    content: string | null,
 | 
			
		||||
    extra: Message['extra'],
 | 
			
		||||
    onChunk: CallbackGeneratedChunk
 | 
			
		||||
  ) => Promise<void>;
 | 
			
		||||
 | 
			
		||||
@@ -274,6 +276,7 @@ export const AppContextProvider = ({
 | 
			
		||||
    convId: string | null,
 | 
			
		||||
    leafNodeId: Message['id'] | null,
 | 
			
		||||
    content: string,
 | 
			
		||||
    extra: Message['extra'],
 | 
			
		||||
    onChunk: CallbackGeneratedChunk
 | 
			
		||||
  ): Promise<boolean> => {
 | 
			
		||||
    if (isGenerating(convId ?? '') || content.trim().length === 0) return false;
 | 
			
		||||
@@ -298,6 +301,7 @@ export const AppContextProvider = ({
 | 
			
		||||
        convId,
 | 
			
		||||
        role: 'user',
 | 
			
		||||
        content,
 | 
			
		||||
        extra,
 | 
			
		||||
        parent: leafNodeId,
 | 
			
		||||
        children: [],
 | 
			
		||||
      },
 | 
			
		||||
@@ -324,6 +328,7 @@ export const AppContextProvider = ({
 | 
			
		||||
    convId: string,
 | 
			
		||||
    parentNodeId: Message['id'], // the parent node of the message to be replaced
 | 
			
		||||
    content: string | null,
 | 
			
		||||
    extra: Message['extra'],
 | 
			
		||||
    onChunk: CallbackGeneratedChunk
 | 
			
		||||
  ) => {
 | 
			
		||||
    if (isGenerating(convId)) return;
 | 
			
		||||
@@ -339,6 +344,7 @@ export const AppContextProvider = ({
 | 
			
		||||
          convId,
 | 
			
		||||
          role: 'user',
 | 
			
		||||
          content,
 | 
			
		||||
          extra,
 | 
			
		||||
          parent: parentNodeId,
 | 
			
		||||
          children: [],
 | 
			
		||||
        },
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										62
									
								
								examples/server/webui/src/utils/llama-vscode.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										62
									
								
								examples/server/webui/src/utils/llama-vscode.ts
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,62 @@
 | 
			
		||||
import { useEffect, useState } from 'react';
 | 
			
		||||
import { MessageExtraContext } from './types';
 | 
			
		||||
 | 
			
		||||
// Extra context when using llama.cpp WebUI from llama-vscode, inside an iframe
 | 
			
		||||
// Ref: https://github.com/ggml-org/llama.cpp/pull/11940
 | 
			
		||||
 | 
			
		||||
interface SetTextEvData {
 | 
			
		||||
  text: string;
 | 
			
		||||
  context: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * To test it:
 | 
			
		||||
 * window.postMessage({ command: 'setText', text: 'Spot the syntax error', context: 'def test()\n  return 123' }, '*');
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
export const useVSCodeContext = (
 | 
			
		||||
  inputRef: React.RefObject<HTMLTextAreaElement>,
 | 
			
		||||
  setInputMsg: (text: string) => void
 | 
			
		||||
) => {
 | 
			
		||||
  const [extraContext, setExtraContext] = useState<MessageExtraContext | null>(
 | 
			
		||||
    null
 | 
			
		||||
  );
 | 
			
		||||
 | 
			
		||||
  // Accept setText message from a parent window and set inputMsg and extraContext
 | 
			
		||||
  useEffect(() => {
 | 
			
		||||
    const handleMessage = (event: MessageEvent) => {
 | 
			
		||||
      if (event.data?.command === 'setText') {
 | 
			
		||||
        const data: SetTextEvData = event.data;
 | 
			
		||||
        setInputMsg(data?.text);
 | 
			
		||||
        if (data?.context && data.context.length > 0) {
 | 
			
		||||
          setExtraContext({
 | 
			
		||||
            type: 'context',
 | 
			
		||||
            content: data.context,
 | 
			
		||||
          });
 | 
			
		||||
        }
 | 
			
		||||
        inputRef.current?.focus();
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    window.addEventListener('message', handleMessage);
 | 
			
		||||
    return () => window.removeEventListener('message', handleMessage);
 | 
			
		||||
  }, []);
 | 
			
		||||
 | 
			
		||||
  // Add a keydown listener that sends the "escapePressed" message to the parent window
 | 
			
		||||
  useEffect(() => {
 | 
			
		||||
    const handleKeyDown = (event: KeyboardEvent) => {
 | 
			
		||||
      if (event.key === 'Escape') {
 | 
			
		||||
        window.parent.postMessage({ command: 'escapePressed' }, '*');
 | 
			
		||||
      }
 | 
			
		||||
    };
 | 
			
		||||
 | 
			
		||||
    window.addEventListener('keydown', handleKeyDown);
 | 
			
		||||
    return () => window.removeEventListener('keydown', handleKeyDown);
 | 
			
		||||
  }, []);
 | 
			
		||||
 | 
			
		||||
  return {
 | 
			
		||||
    extraContext,
 | 
			
		||||
    // call once the user message is sent, to clear the extra context
 | 
			
		||||
    clearExtraContext: () => setExtraContext(null),
 | 
			
		||||
  };
 | 
			
		||||
};
 | 
			
		||||
@@ -53,12 +53,23 @@ export const copyStr = (textToCopy: string) => {
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * filter out redundant fields upon sending to API
 | 
			
		||||
 * also format extra into text
 | 
			
		||||
 */
 | 
			
		||||
export function normalizeMsgsForAPI(messages: Readonly<Message[]>) {
 | 
			
		||||
  return messages.map((msg) => {
 | 
			
		||||
    let newContent = '';
 | 
			
		||||
 | 
			
		||||
    for (const extra of msg.extra ?? []) {
 | 
			
		||||
      if (extra.type === 'context') {
 | 
			
		||||
        newContent += `${extra.content}\n\n`;
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    newContent += msg.content;
 | 
			
		||||
 | 
			
		||||
    return {
 | 
			
		||||
      role: msg.role,
 | 
			
		||||
      content: msg.content,
 | 
			
		||||
      content: newContent,
 | 
			
		||||
    };
 | 
			
		||||
  }) as APIMessage[];
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -42,11 +42,25 @@ export interface Message {
 | 
			
		||||
  role: 'user' | 'assistant' | 'system';
 | 
			
		||||
  content: string;
 | 
			
		||||
  timings?: TimingReport;
 | 
			
		||||
  extra?: MessageExtra[];
 | 
			
		||||
  // node based system for branching
 | 
			
		||||
  parent: Message['id'];
 | 
			
		||||
  children: Message['id'][];
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type MessageExtra = MessageExtraTextFile | MessageExtraContext; // TODO: will add more in the future
 | 
			
		||||
 | 
			
		||||
export interface MessageExtraTextFile {
 | 
			
		||||
  type: 'textFile';
 | 
			
		||||
  name: string;
 | 
			
		||||
  content: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export interface MessageExtraContext {
 | 
			
		||||
  type: 'context';
 | 
			
		||||
  content: string;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
export type APIMessage = Pick<Message, 'role' | 'content'>;
 | 
			
		||||
 | 
			
		||||
export interface Conversation {
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user