diff --git a/server/utils/helpers/chat/index.js b/server/utils/helpers/chat/index.js index ed7eab90..b969201e 100644 --- a/server/utils/helpers/chat/index.js +++ b/server/utils/helpers/chat/index.js @@ -300,7 +300,7 @@ function cannonball({ // if the delta is the token difference between where our prompt is in size // and where we ideally need to land. const delta = initialInputSize - targetTokenSize; - const tokenChunks = tokenManager.tokensFromString(input); + const tokenChunks = tokenManager.countFromString(input); const middleIdx = Math.floor(tokenChunks.length / 2); // middle truncate the text going left and right of midpoint diff --git a/server/utils/helpers/tiktoken.js b/server/utils/helpers/tiktoken.js index ad1cdd44..13475549 100644 --- a/server/utils/helpers/tiktoken.js +++ b/server/utils/helpers/tiktoken.js @@ -3,12 +3,11 @@ const { getEncodingNameForModel, getEncoding } = require("js-tiktoken"); class TokenManager { constructor(model = "gpt-3.5-turbo") { this.model = model; - this.encoderName = this.getEncodingFromModel(model); + this.encoderName = this.#getEncodingFromModel(model); this.encoder = getEncoding(this.encoderName); - this.buffer = 50; } - getEncodingFromModel(model) { + #getEncodingFromModel(model) { try { return getEncodingNameForModel(model); } catch { @@ -16,18 +15,15 @@ class TokenManager { } } - tokensFromString(input = "") { - const tokens = this.encoder.encode(input); - return tokens; - } - bytesFromTokens(tokens = []) { const bytes = this.encoder.decode(tokens); return bytes; } + // Pass in an empty array of disallowedSpecials to handle all tokens as text and to be tokenized. + // https://github.com/openai/tiktoken/blob/9e79899bc248d5313c7dd73562b5e211d728723d/tiktoken/core.py#L91C20-L91C38 countFromString(input = "") { - const tokens = this.encoder.encode(input); + const tokens = this.encoder.encode(input, undefined, []); return tokens.length; }