mirror of
				https://github.com/ggml-org/llama.cpp.git
				synced 2025-11-04 09:32:00 +00:00 
			
		
		
		
	Main+: optionally allow special tokens from user in interactive mode (#7097)
@hanishkvc added a new `--interactive-specials` flag which would allow for inserting special tokens from user side into the embedding stream.
This commit is contained in:
		@@ -901,6 +901,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
 | 
				
			|||||||
        params.interactive = true;
 | 
					        params.interactive = true;
 | 
				
			||||||
        return true;
 | 
					        return true;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					    if (arg == "--interactive-specials") {
 | 
				
			||||||
 | 
					        params.interactive_specials = true;
 | 
				
			||||||
 | 
					        return true;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
    if (arg == "--embedding") {
 | 
					    if (arg == "--embedding") {
 | 
				
			||||||
        params.embedding = true;
 | 
					        params.embedding = true;
 | 
				
			||||||
        return true;
 | 
					        return true;
 | 
				
			||||||
@@ -1422,6 +1426,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
 | 
				
			|||||||
    printf("  -h, --help            show this help message and exit\n");
 | 
					    printf("  -h, --help            show this help message and exit\n");
 | 
				
			||||||
    printf("  --version             show version and build info\n");
 | 
					    printf("  --version             show version and build info\n");
 | 
				
			||||||
    printf("  -i, --interactive     run in interactive mode\n");
 | 
					    printf("  -i, --interactive     run in interactive mode\n");
 | 
				
			||||||
 | 
					    printf("  --interactive-specials allow special tokens in user text, in interactive mode\n");
 | 
				
			||||||
    printf("  --interactive-first   run in interactive mode and wait for input right away\n");
 | 
					    printf("  --interactive-first   run in interactive mode and wait for input right away\n");
 | 
				
			||||||
    printf("  -cnv, --conversation  run in conversation mode (does not print special tokens and suffix/prefix)\n");
 | 
					    printf("  -cnv, --conversation  run in conversation mode (does not print special tokens and suffix/prefix)\n");
 | 
				
			||||||
    printf("  -ins, --instruct      run in instruction mode (use with Alpaca models)\n");
 | 
					    printf("  -ins, --instruct      run in instruction mode (use with Alpaca models)\n");
 | 
				
			||||||
@@ -2652,6 +2657,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
 | 
				
			|||||||
    dump_string_yaml_multiline(stream, "in_suffix", params.input_prefix.c_str());
 | 
					    dump_string_yaml_multiline(stream, "in_suffix", params.input_prefix.c_str());
 | 
				
			||||||
    fprintf(stream, "instruct: %s # default: false\n", params.instruct ? "true" : "false");
 | 
					    fprintf(stream, "instruct: %s # default: false\n", params.instruct ? "true" : "false");
 | 
				
			||||||
    fprintf(stream, "interactive: %s # default: false\n", params.interactive ? "true" : "false");
 | 
					    fprintf(stream, "interactive: %s # default: false\n", params.interactive ? "true" : "false");
 | 
				
			||||||
 | 
					    fprintf(stream, "interactive_specials: %s # default: false\n", params.interactive_specials ? "true" : "false");
 | 
				
			||||||
    fprintf(stream, "interactive_first: %s # default: false\n", params.interactive_first ? "true" : "false");
 | 
					    fprintf(stream, "interactive_first: %s # default: false\n", params.interactive_first ? "true" : "false");
 | 
				
			||||||
    fprintf(stream, "keep: %d # default: 0\n", params.n_keep);
 | 
					    fprintf(stream, "keep: %d # default: 0\n", params.n_keep);
 | 
				
			||||||
    fprintf(stream, "logdir: %s # default: unset (no logging)\n", params.logdir.c_str());
 | 
					    fprintf(stream, "logdir: %s # default: unset (no logging)\n", params.logdir.c_str());
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -140,6 +140,7 @@ struct gpt_params {
 | 
				
			|||||||
    bool random_prompt     = false; // do not randomize prompt if none provided
 | 
					    bool random_prompt     = false; // do not randomize prompt if none provided
 | 
				
			||||||
    bool use_color         = false; // use color to distinguish generations and inputs
 | 
					    bool use_color         = false; // use color to distinguish generations and inputs
 | 
				
			||||||
    bool interactive       = false; // interactive mode
 | 
					    bool interactive       = false; // interactive mode
 | 
				
			||||||
 | 
					    bool interactive_specials = false; // whether to allow special tokens from user, during interactive mode
 | 
				
			||||||
    bool conversation      = false; // conversation mode (does not print special tokens and suffix/prefix)
 | 
					    bool conversation      = false; // conversation mode (does not print special tokens and suffix/prefix)
 | 
				
			||||||
    bool chatml            = false; // chatml mode (used for models trained on chatml syntax)
 | 
					    bool chatml            = false; // chatml mode (used for models trained on chatml syntax)
 | 
				
			||||||
    bool prompt_cache_all  = false; // save user input and generations to prompt cache
 | 
					    bool prompt_cache_all  = false; // save user input and generations to prompt cache
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -879,7 +879,7 @@ int main(int argc, char ** argv) {
 | 
				
			|||||||
                    }
 | 
					                    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
                    const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
 | 
					                    const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
 | 
				
			||||||
                    const auto line_inp = ::llama_tokenize(ctx, buffer,              false, false);
 | 
					                    const auto line_inp = ::llama_tokenize(ctx, buffer,              false, params.interactive_specials);
 | 
				
			||||||
                    const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
 | 
					                    const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
                    LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
 | 
					                    LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user