mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-19 12:40:09 +01:00
1417 completion timeout (#2374)
* Refactor handleDefaultStreamResponseV2 function for better error handling * run yarn lint * small error handling changes * update error handling flow and scope of vars * add back space --------- Co-authored-by: Roman <rrojaski@gmail.com>
This commit is contained in:
parent
e6c4eb3f1c
commit
44dddcd4af
@ -22,41 +22,55 @@ function handleDefaultStreamResponseV2(response, stream, responseProps) {
|
||||
const handleAbort = () => clientAbortedHandler(resolve, fullText);
|
||||
response.on("close", handleAbort);
|
||||
|
||||
for await (const chunk of stream) {
|
||||
const message = chunk?.choices?.[0];
|
||||
const token = message?.delta?.content;
|
||||
// Now handle the chunks from the streamed response and append to fullText.
|
||||
try {
|
||||
for await (const chunk of stream) {
|
||||
const message = chunk?.choices?.[0];
|
||||
const token = message?.delta?.content;
|
||||
|
||||
if (token) {
|
||||
fullText += token;
|
||||
writeResponseChunk(response, {
|
||||
uuid,
|
||||
sources: [],
|
||||
type: "textResponseChunk",
|
||||
textResponse: token,
|
||||
close: false,
|
||||
error: false,
|
||||
});
|
||||
}
|
||||
if (token) {
|
||||
fullText += token;
|
||||
writeResponseChunk(response, {
|
||||
uuid,
|
||||
sources: [],
|
||||
type: "textResponseChunk",
|
||||
textResponse: token,
|
||||
close: false,
|
||||
error: false,
|
||||
});
|
||||
}
|
||||
|
||||
// LocalAi returns '' and others return null on chunks - the last chunk is not "" or null.
|
||||
// Either way, the key `finish_reason` must be present to determine ending chunk.
|
||||
if (
|
||||
message?.hasOwnProperty("finish_reason") && // Got valid message and it is an object with finish_reason
|
||||
message.finish_reason !== "" &&
|
||||
message.finish_reason !== null
|
||||
) {
|
||||
writeResponseChunk(response, {
|
||||
uuid,
|
||||
sources,
|
||||
type: "textResponseChunk",
|
||||
textResponse: "",
|
||||
close: true,
|
||||
error: false,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
break; // Break streaming when a valid finish_reason is first encountered
|
||||
// LocalAi returns '' and others return null on chunks - the last chunk is not "" or null.
|
||||
// Either way, the key `finish_reason` must be present to determine ending chunk.
|
||||
if (
|
||||
message?.hasOwnProperty("finish_reason") && // Got valid message and it is an object with finish_reason
|
||||
message.finish_reason !== "" &&
|
||||
message.finish_reason !== null
|
||||
) {
|
||||
writeResponseChunk(response, {
|
||||
uuid,
|
||||
sources,
|
||||
type: "textResponseChunk",
|
||||
textResponse: "",
|
||||
close: true,
|
||||
error: false,
|
||||
});
|
||||
response.removeListener("close", handleAbort);
|
||||
resolve(fullText);
|
||||
break; // Break streaming when a valid finish_reason is first encountered
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
|
||||
writeResponseChunk(response, {
|
||||
uuid,
|
||||
type: "abort",
|
||||
textResponse: null,
|
||||
sources: [],
|
||||
close: true,
|
||||
error: e.message,
|
||||
});
|
||||
resolve(fullText); // Return what we currently have - if anything.
|
||||
}
|
||||
});
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user