mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2025-10-27 08:21:30 +00:00
webui : fix handling incomplete chunks (#16107)
This commit is contained in:
@@ -264,12 +264,14 @@ export class ChatService {
|
||||
let lastTimings: ChatMessageTimings | undefined;
|
||||
|
||||
try {
|
||||
let chunk = '';
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true });
|
||||
chunk += decoder.decode(value, { stream: true });
|
||||
const lines = chunk.split('\n');
|
||||
chunk = lines.pop() || ''; // Save incomplete line for next read
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
|
||||
Reference in New Issue
Block a user