llama : improve llama_batch API + simplify parallel example

This commit is contained in:
Georgi Gerganov
2023-09-20 10:46:18 +03:00
parent a1327c71c6
commit addae65fd4
6 changed files with 111 additions and 70 deletions

View File

@@ -10,10 +10,12 @@ int main(int argc, char ** argv) {
gpt_params params;
if (argc == 1 || argv[1][0] == '-') {
printf("usage: %s MODEL_PATH [PROMPT]\n" , argv[0]);
printf("usage: %s MODEL_PATH [PROMPT] [PARALLEL]\n" , argv[0]);
return 1 ;
}
int n_parallel = 1;
if (argc >= 2) {
params.model = argv[1];
}
@@ -22,6 +24,10 @@ int main(int argc, char ** argv) {
params.prompt = argv[2];
}
if (argc >= 4) {
n_parallel = std::atoi(argv[3]);
}
if (params.prompt.empty()) {
params.prompt = "Hello my name is";
}