diff --git a/server/endpoints/chat.js b/server/endpoints/chat.js index de2a4b45..b0afed76 100644 --- a/server/endpoints/chat.js +++ b/server/endpoints/chat.js @@ -64,11 +64,15 @@ function chatEndpoints(app) { } const result = await chatWithWorkspace(workspace, message, mode, user); - await Telemetry.sendTelemetry("sent_chat", { - multiUserMode: multiUserMode(response), - LLMSelection: process.env.LLM_PROVIDER || "openai", - VectorDbSelection: process.env.VECTOR_DB || "pinecone", - }); + await Telemetry.sendTelemetry( + "sent_chat", + { + multiUserMode: multiUserMode(response), + LLMSelection: process.env.LLM_PROVIDER || "openai", + VectorDbSelection: process.env.VECTOR_DB || "pinecone", + }, + user?.id + ); response.status(200).json({ ...result }); } catch (e) { console.error(e); diff --git a/server/endpoints/system.js b/server/endpoints/system.js index 202ba376..b6868ec1 100644 --- a/server/endpoints/system.js +++ b/server/endpoints/system.js @@ -134,6 +134,11 @@ function systemEndpoints(app) { return; } + await Telemetry.sendTelemetry( + "login_event", + { multiUserMode: false }, + existingUser?.id + ); response.status(200).json({ valid: true, user: existingUser, @@ -155,6 +160,7 @@ function systemEndpoints(app) { return; } + await Telemetry.sendTelemetry("login_event", { multiUserMode: false }); response.status(200).json({ valid: true, token: makeJWT({ p: password }, "30d"), @@ -325,7 +331,9 @@ function systemEndpoints(app) { true ); if (process.env.NODE_ENV === "production") await dumpENV(); - await Telemetry.sendTelemetry("enabled_multi_user_mode"); + await Telemetry.sendTelemetry("enabled_multi_user_mode", { + multiUserMode: true, + }); response.status(200).json({ success: !!user, error }); } catch (e) { await User.delete({}); diff --git a/server/endpoints/workspaces.js b/server/endpoints/workspaces.js index 51ffc23e..c32eb475 100644 --- a/server/endpoints/workspaces.js +++ b/server/endpoints/workspaces.js @@ -23,11 +23,15 @@ function workspaceEndpoints(app) { const user = await userFromSession(request, response); const { name = null, onboardingComplete = false } = reqBody(request); const { workspace, message } = await Workspace.new(name, user?.id); - await Telemetry.sendTelemetry("workspace_created", { - multiUserMode: multiUserMode(response), - LLMSelection: process.env.LLM_PROVIDER || "openai", - VectorDbSelection: process.env.VECTOR_DB || "pinecone", - }); + await Telemetry.sendTelemetry( + "workspace_created", + { + multiUserMode: multiUserMode(response), + LLMSelection: process.env.LLM_PROVIDER || "openai", + VectorDbSelection: process.env.VECTOR_DB || "pinecone", + }, + user?.id + ); if (onboardingComplete === true) await Telemetry.sendTelemetry("onboarding_complete"); diff --git a/server/models/telemetry.js b/server/models/telemetry.js index 19c47f7a..d1f0b038 100644 --- a/server/models/telemetry.js +++ b/server/models/telemetry.js @@ -28,12 +28,14 @@ const Telemetry = { return new PostHog(this.pubkey); }, - sendTelemetry: async function (event, properties = {}) { + sendTelemetry: async function (event, properties = {}, subUserId = null) { try { - const { client, distinctId } = await this.connect(); + const { client, distinctId: systemId } = await this.connect(); if (!client) return; + const distinctId = !!subUserId ? `${systemId}::${subUserId}` : systemId; console.log(`\x1b[32m[TELEMETRY SENT]\x1b[0m`, { event, + distinctId, properties, }); client.capture({