mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-10 17:00:11 +01:00
[FEAT] Persist query mode refusal responses as chat history (#1727)
* log query refusals to workspace chats but hide in ui * linting --------- Co-authored-by: timothycarambat <rambat1010@gmail.com>
This commit is contained in:
parent
1d675d09fb
commit
c2523a9593
@ -7,6 +7,7 @@ const WorkspaceChats = {
|
||||
response = {},
|
||||
user = null,
|
||||
threadId = null,
|
||||
include = true,
|
||||
}) {
|
||||
try {
|
||||
const chat = await prisma.workspace_chats.create({
|
||||
@ -16,6 +17,7 @@ const WorkspaceChats = {
|
||||
response: JSON.stringify(response),
|
||||
user_id: user?.id || null,
|
||||
thread_id: threadId,
|
||||
include,
|
||||
},
|
||||
});
|
||||
return { chat, message: null };
|
||||
|
@ -77,15 +77,30 @@ async function chatWithWorkspace(
|
||||
// User is trying to query-mode chat a workspace that has no data in it - so
|
||||
// we should exit early as no information can be found under these conditions.
|
||||
if ((!hasVectorizedSpace || embeddingsCount === 0) && chatMode === "query") {
|
||||
const textResponse =
|
||||
workspace?.queryRefusalResponse ??
|
||||
"There is no relevant information in this workspace to answer your query.";
|
||||
|
||||
await WorkspaceChats.new({
|
||||
workspaceId: workspace.id,
|
||||
prompt: message,
|
||||
response: {
|
||||
text: textResponse,
|
||||
sources: [],
|
||||
type: chatMode,
|
||||
},
|
||||
threadId: thread?.id || null,
|
||||
include: false,
|
||||
user,
|
||||
});
|
||||
|
||||
return {
|
||||
id: uuid,
|
||||
type: "textResponse",
|
||||
sources: [],
|
||||
close: true,
|
||||
error: null,
|
||||
textResponse:
|
||||
workspace?.queryRefusalResponse ??
|
||||
"There is no relevant information in this workspace to answer your query.",
|
||||
textResponse,
|
||||
};
|
||||
}
|
||||
|
||||
@ -172,15 +187,30 @@ async function chatWithWorkspace(
|
||||
// If in query mode and no context chunks are found from search, backfill, or pins - do not
|
||||
// let the LLM try to hallucinate a response or use general knowledge and exit early
|
||||
if (chatMode === "query" && contextTexts.length === 0) {
|
||||
const textResponse =
|
||||
workspace?.queryRefusalResponse ??
|
||||
"There is no relevant information in this workspace to answer your query.";
|
||||
|
||||
await WorkspaceChats.new({
|
||||
workspaceId: workspace.id,
|
||||
prompt: message,
|
||||
response: {
|
||||
text: textResponse,
|
||||
sources: [],
|
||||
type: chatMode,
|
||||
},
|
||||
threadId: thread?.id || null,
|
||||
include: false,
|
||||
user,
|
||||
});
|
||||
|
||||
return {
|
||||
id: uuid,
|
||||
type: "textResponse",
|
||||
sources: [],
|
||||
close: true,
|
||||
error: null,
|
||||
textResponse:
|
||||
workspace?.queryRefusalResponse ??
|
||||
"There is no relevant information in this workspace to answer your query.",
|
||||
textResponse,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -75,16 +75,29 @@ async function streamChatWithWorkspace(
|
||||
// User is trying to query-mode chat a workspace that has no data in it - so
|
||||
// we should exit early as no information can be found under these conditions.
|
||||
if ((!hasVectorizedSpace || embeddingsCount === 0) && chatMode === "query") {
|
||||
const textResponse =
|
||||
workspace?.queryRefusalResponse ??
|
||||
"There is no relevant information in this workspace to answer your query.";
|
||||
writeResponseChunk(response, {
|
||||
id: uuid,
|
||||
type: "textResponse",
|
||||
textResponse:
|
||||
workspace?.queryRefusalResponse ??
|
||||
"There is no relevant information in this workspace to answer your query.",
|
||||
textResponse,
|
||||
sources: [],
|
||||
close: true,
|
||||
error: null,
|
||||
});
|
||||
await WorkspaceChats.new({
|
||||
workspaceId: workspace.id,
|
||||
prompt: message,
|
||||
response: {
|
||||
text: textResponse,
|
||||
sources: [],
|
||||
type: chatMode,
|
||||
},
|
||||
threadId: thread?.id || null,
|
||||
include: false,
|
||||
user,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@ -177,16 +190,30 @@ async function streamChatWithWorkspace(
|
||||
// If in query mode and no context chunks are found from search, backfill, or pins - do not
|
||||
// let the LLM try to hallucinate a response or use general knowledge and exit early
|
||||
if (chatMode === "query" && contextTexts.length === 0) {
|
||||
const textResponse =
|
||||
workspace?.queryRefusalResponse ??
|
||||
"There is no relevant information in this workspace to answer your query.";
|
||||
writeResponseChunk(response, {
|
||||
id: uuid,
|
||||
type: "textResponse",
|
||||
textResponse:
|
||||
workspace?.queryRefusalResponse ??
|
||||
"There is no relevant information in this workspace to answer your query.",
|
||||
textResponse,
|
||||
sources: [],
|
||||
close: true,
|
||||
error: null,
|
||||
});
|
||||
|
||||
await WorkspaceChats.new({
|
||||
workspaceId: workspace.id,
|
||||
prompt: message,
|
||||
response: {
|
||||
text: textResponse,
|
||||
sources: [],
|
||||
type: chatMode,
|
||||
},
|
||||
threadId: thread?.id || null,
|
||||
include: false,
|
||||
user,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user