From 3f78ef413be9e3cf51770a05c3d8ea4eeb644ab6 Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Tue, 28 May 2024 17:17:35 -0700 Subject: [PATCH 01/12] [FEAT] Support for gemini-1.0-pro model and fixes to prompt window limit (#1557) support for gemini-1.0-pro model and fixes to prompt window limit --- .../src/components/LLMSelection/GeminiLLMOptions/index.jsx | 1 + frontend/src/hooks/useGetProvidersModels.js | 2 +- server/utils/AiProviders/gemini/index.js | 5 +++++ server/utils/helpers/updateENV.js | 1 + 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/frontend/src/components/LLMSelection/GeminiLLMOptions/index.jsx b/frontend/src/components/LLMSelection/GeminiLLMOptions/index.jsx index 87e05882..cc25ae95 100644 --- a/frontend/src/components/LLMSelection/GeminiLLMOptions/index.jsx +++ b/frontend/src/components/LLMSelection/GeminiLLMOptions/index.jsx @@ -32,6 +32,7 @@ export default function GeminiLLMOptions({ settings }) { > {[ "gemini-pro", + "gemini-1.0-pro", "gemini-1.5-pro-latest", "gemini-1.5-flash-latest", ].map((model) => { diff --git a/frontend/src/hooks/useGetProvidersModels.js b/frontend/src/hooks/useGetProvidersModels.js index 6687f0a7..d61d4c36 100644 --- a/frontend/src/hooks/useGetProvidersModels.js +++ b/frontend/src/hooks/useGetProvidersModels.js @@ -10,7 +10,7 @@ export const DISABLED_PROVIDERS = [ ]; const PROVIDER_DEFAULT_MODELS = { openai: [], - gemini: ["gemini-pro", "gemini-1.5-pro-latest", "gemini-1.5-flash-latest"], + gemini: ["gemini-pro","gemini-1.0-pro", "gemini-1.5-pro-latest", "gemini-1.5-flash-latest"], anthropic: [ "claude-instant-1.2", "claude-2.0", diff --git a/server/utils/AiProviders/gemini/index.js b/server/utils/AiProviders/gemini/index.js index 30c9ffa3..ef184580 100644 --- a/server/utils/AiProviders/gemini/index.js +++ b/server/utils/AiProviders/gemini/index.js @@ -91,6 +91,10 @@ class GeminiLLM { switch (this.model) { case "gemini-pro": return 30_720; + case "gemini-1.0-pro": + return 30_720; + case "gemini-1.5-flash-latest": + return 1_048_576; case "gemini-1.5-pro-latest": return 1_048_576; default: @@ -101,6 +105,7 @@ class GeminiLLM { isValidChatCompletionModel(modelName = "") { const validModels = [ "gemini-pro", + "gemini-1.0-pro", "gemini-1.5-pro-latest", "gemini-1.5-flash-latest", ]; diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js index d6900ae5..d5cdc68f 100644 --- a/server/utils/helpers/updateENV.js +++ b/server/utils/helpers/updateENV.js @@ -532,6 +532,7 @@ function supportedTranscriptionProvider(input = "") { function validGeminiModel(input = "") { const validModels = [ "gemini-pro", + "gemini-1.0-pro", "gemini-1.5-pro-latest", "gemini-1.5-flash-latest", ]; From 4324a8bb4f5b50221b58bc646c8dc0d342b0dc8a Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Wed, 29 May 2024 02:01:29 -0700 Subject: [PATCH 02/12] [FEAT] Github repo loader bug fix (#1558) * fix project names with special characters for github repo data connector * linting --- collector/utils/extensions/GithubRepo/RepoLoader/index.js | 6 +++++- frontend/src/hooks/useGetProvidersModels.js | 7 ++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/collector/utils/extensions/GithubRepo/RepoLoader/index.js b/collector/utils/extensions/GithubRepo/RepoLoader/index.js index dbe26fa2..c842f621 100644 --- a/collector/utils/extensions/GithubRepo/RepoLoader/index.js +++ b/collector/utils/extensions/GithubRepo/RepoLoader/index.js @@ -14,7 +14,11 @@ class RepoLoader { #validGithubUrl() { const UrlPattern = require("url-pattern"); const pattern = new UrlPattern( - "https\\://github.com/(:author)/(:project(*))" + "https\\://github.com/(:author)/(:project(*))", + { + // fixes project names with special characters (.github) + segmentValueCharset: "a-zA-Z0-9-._~%/+", + } ); const match = pattern.match(this.repo); if (!match) return false; diff --git a/frontend/src/hooks/useGetProvidersModels.js b/frontend/src/hooks/useGetProvidersModels.js index d61d4c36..e0a27699 100644 --- a/frontend/src/hooks/useGetProvidersModels.js +++ b/frontend/src/hooks/useGetProvidersModels.js @@ -10,7 +10,12 @@ export const DISABLED_PROVIDERS = [ ]; const PROVIDER_DEFAULT_MODELS = { openai: [], - gemini: ["gemini-pro","gemini-1.0-pro", "gemini-1.5-pro-latest", "gemini-1.5-flash-latest"], + gemini: [ + "gemini-pro", + "gemini-1.0-pro", + "gemini-1.5-pro-latest", + "gemini-1.5-flash-latest", + ], anthropic: [ "claude-instant-1.2", "claude-2.0", From 9a38b32c746b790531975d27888fea5552050062 Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Thu, 30 May 2024 22:52:00 -0700 Subject: [PATCH 03/12] [FEAT] Add support for R files to be parsed as text (#1577) add support for R files to be parsed as text --- collector/utils/files/mime.js | 1 + 1 file changed, 1 insertion(+) diff --git a/collector/utils/files/mime.js b/collector/utils/files/mime.js index 635a6aa3..5b3f9574 100644 --- a/collector/utils/files/mime.js +++ b/collector/utils/files/mime.js @@ -35,6 +35,7 @@ class MimeDetector { "js", "lua", "pas", + "r", ], }, true From 8a4dd2bdf5b2dfe986e91e5693881b2c1cc368c6 Mon Sep 17 00:00:00 2001 From: Chris Daniel Date: Mon, 3 Jun 2024 05:01:41 -0400 Subject: [PATCH 04/12] [FEAT] add support for TSX files to be parsed as text (#1597) add support for TSX files to be parsed as text --- collector/utils/files/mime.js | 1 + 1 file changed, 1 insertion(+) diff --git a/collector/utils/files/mime.js b/collector/utils/files/mime.js index 5b3f9574..6cd88f82 100644 --- a/collector/utils/files/mime.js +++ b/collector/utils/files/mime.js @@ -23,6 +23,7 @@ class MimeDetector { { "text/plain": [ "ts", + "tsx", "py", "opts", "lock", From 0055de86e3acd72fbf585dda06b76f52050ce0e4 Mon Sep 17 00:00:00 2001 From: Kumar Shivendu Date: Mon, 3 Jun 2024 21:30:47 +0530 Subject: [PATCH 05/12] docs: Typo in Qdrant's name (#1598) fix: Typo in Qdrant's name --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index dfedb4f9..178473e1 100644 --- a/README.md +++ b/README.md @@ -123,7 +123,7 @@ Some cool features of AnythingLLM - [Pinecone](https://pinecone.io) - [Chroma](https://trychroma.com) - [Weaviate](https://weaviate.io) -- [QDrant](https://qdrant.tech) +- [Qdrant](https://qdrant.tech) - [Milvus](https://milvus.io) - [Zilliz](https://zilliz.com) From dfcf32e9c06101aec20e1ca0a842b0eb4f21c293 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Fri, 7 Jun 2024 04:13:15 +0900 Subject: [PATCH 06/12] docs: add Japanese README (#1574) * docs: add Japanese README * docs: update README.md --- README.ja-JP.md | 235 ++++++++++++++++++++++++++++++++++++++++++++++++ README.md | 2 +- 2 files changed, 236 insertions(+), 1 deletion(-) create mode 100644 README.ja-JP.md diff --git a/README.ja-JP.md b/README.ja-JP.md new file mode 100644 index 00000000..ae587373 --- /dev/null +++ b/README.ja-JP.md @@ -0,0 +1,235 @@ + + +

+ AnythingLLM logo +

+ +
+Mintplex-Labs%2Fanything-llm | Trendshift +
+ +

+ AnythingLLM: あなたが探していたオールインワンAIアプリ。
+ ドキュメントとチャットし、AIエージェントを使用し、高度にカスタマイズ可能で、複数ユーザー対応、面倒な設定は不要です。 +

+ +

+ + Discord + | + + ライセンス + | + + ドキュメント + | + + ホストされたインスタンス + +

+ +

+ English · 简体中文 · 日本語 +

+ +

+👉 デスクトップ用AnythingLLM(Mac、Windows、Linux対応)!今すぐダウンロード +

+ +これは、任意のドキュメント、リソース、またはコンテンツの断片を、チャット中にLLMが参照として使用できるコンテキストに変換できるフルスタックアプリケーションです。このアプリケーションを使用すると、使用するLLMまたはベクトルデータベースを選択し、マルチユーザー管理と権限をサポートできます。 + +![チャット](https://github.com/Mintplex-Labs/anything-llm/assets/16845892/cfc5f47c-bd91-4067-986c-f3f49621a859) + +
+デモを見る! + +[![ビデオを見る](/images/youtube.png)](https://youtu.be/f95rGD9trL0) + +
+ +### 製品概要 + +AnythingLLMは、市販のLLMや人気のあるオープンソースLLM、およびベクトルDBソリューションを使用して、妥協のないプライベートChatGPTを構築できるフルスタックアプリケーションです。ローカルで実行することも、リモートでホストすることもでき、提供されたドキュメントと知的にチャットできます。 + +AnythingLLMは、ドキュメントを`ワークスペース`と呼ばれるオブジェクトに分割します。ワークスペースはスレッドのように機能しますが、ドキュメントのコンテナ化が追加されています。ワークスペースはドキュメントを共有できますが、互いに通信することはないため、各ワークスペースのコンテキストをクリーンに保つことができます。 + +AnythingLLMのいくつかのクールな機能 + +- **マルチユーザーインスタンスのサポートと権限付与** +- ワークスペース内のエージェント(ウェブを閲覧、コードを実行など) +- [ウェブサイト用のカスタム埋め込み可能なチャットウィジェット](./embed/README.md) +- 複数のドキュメントタイプのサポート(PDF、TXT、DOCXなど) +- シンプルなUIからベクトルデータベース内のドキュメントを管理 +- 2つのチャットモード`会話`と`クエリ`。会話は以前の質問と修正を保持します。クエリはドキュメントに対するシンプルなQAです +- チャット中の引用 +- 100%クラウドデプロイメント対応。 +- 「独自のLLMを持参」モデル。 +- 大規模なドキュメントを管理するための非常に効率的なコスト削減策。巨大なドキュメントやトランスクリプトを埋め込むために一度以上支払うことはありません。他のドキュメントチャットボットソリューションよりも90%コスト効率が良いです。 +- カスタム統合のための完全な開発者API! + +### サポートされているLLM、埋め込みモデル、音声モデル、およびベクトルデータベース + +**言語学習モデル:** + +- [llama.cpp互換の任意のオープンソースモデル](/server/storage/models/README.md#text-generation-llm-selection) +- [OpenAI](https://openai.com) +- [OpenAI (汎用)](https://openai.com) +- [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) +- [Anthropic](https://www.anthropic.com/) +- [Google Gemini Pro](https://ai.google.dev/) +- [Hugging Face (チャットモデル)](https://huggingface.co/) +- [Ollama (チャットモデル)](https://ollama.ai/) +- [LM Studio (すべてのモデル)](https://lmstudio.ai) +- [LocalAi (すべてのモデル)](https://localai.io/) +- [Together AI (チャットモデル)](https://www.together.ai/) +- [Perplexity (チャットモデル)](https://www.perplexity.ai/) +- [OpenRouter (チャットモデル)](https://openrouter.ai/) +- [Mistral](https://mistral.ai/) +- [Groq](https://groq.com/) +- [Cohere](https://cohere.com/) +- [KoboldCPP](https://github.com/LostRuins/koboldcpp) + +**埋め込みモデル:** + +- [AnythingLLMネイティブ埋め込み](/server/storage/models/README.md)(デフォルト) +- [OpenAI](https://openai.com) +- [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) +- [LocalAi (すべて)](https://localai.io/) +- [Ollama (すべて)](https://ollama.ai/) +- [LM Studio (すべて)](https://lmstudio.ai) +- [Cohere](https://cohere.com/) + +**音声変換モデル:** + +- [AnythingLLM内蔵](https://github.com/Mintplex-Labs/anything-llm/tree/master/server/storage/models#audiovideo-transcription)(デフォルト) +- [OpenAI](https://openai.com/) + +**TTS(テキストから音声へ)サポート:** + +- ネイティブブラウザ内蔵(デフォルト) +- [OpenAI TTS](https://platform.openai.com/docs/guides/text-to-speech/voice-options) +- [ElevenLabs](https://elevenlabs.io/) + +**STT(音声からテキストへ)サポート:** + +- ネイティブブラウザ内蔵(デフォルト) + +**ベクトルデータベース:** + +- [LanceDB](https://github.com/lancedb/lancedb)(デフォルト) +- [Astra DB](https://www.datastax.com/products/datastax-astra) +- [Pinecone](https://pinecone.io) +- [Chroma](https://trychroma.com) +- [Weaviate](https://weaviate.io) +- [QDrant](https://qdrant.tech) +- [Milvus](https://milvus.io) +- [Zilliz](https://zilliz.com) + +### 技術概要 + +このモノレポは、主に3つのセクションで構成されています: + +- `frontend`: LLMが使用できるすべてのコンテンツを簡単に作成および管理できるviteJS + Reactフロントエンド。 +- `server`: すべてのインタラクションを処理し、すべてのベクトルDB管理およびLLMインタラクションを行うNodeJS expressサーバー。 +- `collector`: UIからドキュメントを処理および解析するNodeJS expressサーバー。 +- `docker`: Dockerの指示およびビルドプロセス + ソースからのビルド情報。 +- `embed`: [埋め込みウィジェット](./embed/README.md)の生成に特化したコード。 + +## 🛳 セルフホスティング + +Mintplex Labsおよびコミュニティは、AnythingLLMをローカルで実行できる多数のデプロイメント方法、スクリプト、テンプレートを維持しています。以下の表を参照して、お好みの環境でのデプロイ方法を読むか、自動デプロイを行ってください。 +| Docker | AWS | GCP | Digital Ocean | Render.com | +|----------------------------------------|----:|-----|---------------|------------| +| [![Docker上でデプロイ][docker-btn]][docker-deploy] | [![AWS上でデプロイ][aws-btn]][aws-deploy] | [![GCP上でデプロイ][gcp-btn]][gcp-deploy] | [![DigitalOcean上でデプロイ][do-btn]][do-deploy] | [![Render.com上でデプロイ][render-btn]][render-deploy] | + +| Railway | +| --------------------------------------------------- | +| [![Railway上でデプロイ][railway-btn]][railway-deploy] | + +[Dockerを使用せずに本番環境のAnythingLLMインスタンスを設定する →](./BARE_METAL.md) + +## 開発環境のセットアップ方法 + +- `yarn setup` 各アプリケーションセクションに必要な`.env`ファイルを入力します(リポジトリのルートから)。 + - 次に進む前にこれらを入力してください。`server/.env.development`が入力されていないと正しく動作しません。 +- `yarn dev:server` ローカルでサーバーを起動します(リポジトリのルートから)。 +- `yarn dev:frontend` ローカルでフロントエンドを起動します(リポジトリのルートから)。 +- `yarn dev:collector` ドキュメントコレクターを実行します(リポジトリのルートから)。 + +[ドキュメントについて学ぶ](./server/storage/documents/DOCUMENTS.md) + +[ベクトルキャッシュについて学ぶ](./server/storage/vector-cache/VECTOR_CACHE.md) + +## 貢献する方法 + +- issueを作成する +- `-`の形式のブランチ名でPRを作成する +- マージしましょう + +## テレメトリーとプライバシー + +Mintplex Labs Inc.によって開発されたAnythingLLMには、匿名の使用情報を収集するテレメトリー機能が含まれています。 + +
+AnythingLLMのテレメトリーとプライバシーについての詳細 + +### なぜ? + +この情報を使用して、AnythingLLMの使用方法を理解し、新機能とバグ修正の優先順位を決定し、AnythingLLMのパフォーマンスと安定性を向上させるのに役立てます。 + +### オプトアウト + +サーバーまたはdockerの.env設定で`DISABLE_TELEMETRY`を「true」に設定して、テレメトリーからオプトアウトします。アプリ内でも、サイドバー > `プライバシー`に移動してテレメトリーを無効にすることができます。 + +### 明示的に追跡するもの + +製品およびロードマップの意思決定に役立つ使用詳細のみを追跡します。具体的には: + +- インストールのタイプ(Dockerまたはデスクトップ) +- ドキュメントが追加または削除されたとき。ドキュメントについての情報はありません。イベントが発生したことのみを知ります。これにより、使用状況を把握できます。 +- 使用中のベクトルデータベースのタイプ。どのベクトルデータベースプロバイダーが最も使用されているかを知り、更新があったときに優先して変更を行います。 +- 使用中のLLMのタイプ。最も人気のある選択肢を知り、更新があったときに優先して変更を行います。 +- チャットが送信された。これは最も一般的な「イベント」であり、すべてのインストールでのこのプロジェクトの日常的な「アクティビティ」についてのアイデアを提供します。再び、イベントのみが送信され、チャット自体の性質や内容に関する情報はありません。 + +これらの主張を検証するには、`Telemetry.sendTelemetry`が呼び出されるすべての場所を見つけてください。また、これらのイベントは出力ログに書き込まれるため、送信された具体的なデータも確認できます。IPアドレスやその他の識別情報は収集されません。テレメトリープロバイダーは[PostHog](https://posthog.com/)です。 + +[ソースコード内のすべてのテレメトリーイベントを表示](https://github.com/search?q=repo%3AMintplex-Labs%2Fanything-llm%20.sendTelemetry\(&type=code) + +
+ +## 🔗 その他の製品 + +- **[VectorAdmin][vector-admin]**:ベクトルデータベースを管理するためのオールインワンGUIおよびツールスイート。 +- **[OpenAI Assistant Swarm][assistant-swarm]**:単一のエージェントから指揮できるOpenAIアシスタントの軍隊に、ライブラリ全体を変換します。 + +
+ +[![][back-to-top]](#readme-top) + +
+ +--- + +Copyright © 2024 [Mintplex Labs][profile-link]。
+このプロジェクトは[MIT](./LICENSE)ライセンスの下でライセンスされています。 + + + +[back-to-top]: https://img.shields.io/badge/-BACK_TO_TOP-222628?style=flat-square +[profile-link]: https://github.com/mintplex-labs +[vector-admin]: https://github.com/mintplex-labs/vector-admin +[assistant-swarm]: https://github.com/Mintplex-Labs/openai-assistant-swarm +[docker-btn]: ./images/deployBtns/docker.png +[docker-deploy]: ./docker/HOW_TO_USE_DOCKER.md +[aws-btn]: ./images/deployBtns/aws.png +[aws-deploy]: ./cloud-deployments/aws/cloudformation/DEPLOY.md +[gcp-btn]: https://deploy.cloud.run/button.svg +[gcp-deploy]: ./cloud-deployments/gcp/deployment/DEPLOY.md +[do-btn]: https://www.deploytodo.com/do-btn-blue.svg +[do-deploy]: ./cloud-deployments/digitalocean/terraform/DEPLOY.md +[render-btn]: https://render.com/images/deploy-to-render-button.svg +[render-deploy]: https://render.com/deploy?repo=https://github.com/Mintplex-Labs/anything-llm&branch=render +[render-btn]: https://render.com/images/deploy-to-render-button.svg +[render-deploy]: https://render.com/deploy?repo=https://github.com/Mintplex-Labs/anything-llm&branch=render +[railway-btn]: https://railway.app/button.svg +[railway-deploy]: https://railway.app/template/HNSCS1?referralCode=WFgJkn diff --git a/README.md b/README.md index 178473e1..3b975a36 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@

- English · 简体中文 + English · 简体中文 · 日本語

From 5578e567cecbd44d956b69bb2f6e4e31e24a23f0 Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Thu, 6 Jun 2024 12:15:45 -0700 Subject: [PATCH 07/12] move translated READMEs into subfolder --- README.md | 2 +- README.ja-JP.md => locales/README.ja-JP.md | 2 +- README.zh-CN.md => locales/README.zh-CN.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) rename README.ja-JP.md => locales/README.ja-JP.md (99%) rename README.zh-CN.md => locales/README.zh-CN.md (99%) diff --git a/README.md b/README.md index 3b975a36..bc3e9fdd 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@

- English · 简体中文 · 日本語 + English · 简体中文 · 日本語

diff --git a/README.ja-JP.md b/locales/README.ja-JP.md similarity index 99% rename from README.ja-JP.md rename to locales/README.ja-JP.md index ae587373..39e442bb 100644 --- a/README.ja-JP.md +++ b/locales/README.ja-JP.md @@ -29,7 +29,7 @@

- English · 简体中文 · 日本語 + English · 简体中文 · 日本語

diff --git a/README.zh-CN.md b/locales/README.zh-CN.md similarity index 99% rename from README.zh-CN.md rename to locales/README.zh-CN.md index 2385a04c..13505220 100644 --- a/README.zh-CN.md +++ b/locales/README.zh-CN.md @@ -25,7 +25,7 @@

- English · 简体中文 + English · 简体中文 · 简体中文

From d29292ebd297b1e2006a5d58943a0b2588efe442 Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Thu, 6 Jun 2024 12:43:34 -0700 Subject: [PATCH 08/12] [FEAT] Add LiteLLM embedding provider support (#1579) * add liteLLM embedding provider support * update tooltip id --------- Co-authored-by: timothycarambat --- docker/.env.example | 6 + .../LiteLLMOptions/index.jsx | 186 ++++++++++++++++++ .../EmbeddingPreference/index.jsx | 9 + .../Steps/DataHandling/index.jsx | 7 + server/.env.example | 6 + .../utils/EmbeddingEngines/liteLLM/index.js | 93 +++++++++ server/utils/helpers/index.js | 3 + server/utils/helpers/updateENV.js | 1 + 8 files changed, 311 insertions(+) create mode 100644 frontend/src/components/EmbeddingSelection/LiteLLMOptions/index.jsx create mode 100644 server/utils/EmbeddingEngines/liteLLM/index.js diff --git a/docker/.env.example b/docker/.env.example index 6368a190..174a9d69 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -128,6 +128,12 @@ GID='1000' # VOYAGEAI_API_KEY= # EMBEDDING_MODEL_PREF='voyage-large-2-instruct' +# EMBEDDING_ENGINE='litellm' +# EMBEDDING_MODEL_PREF='text-embedding-ada-002' +# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192 +# LITE_LLM_BASE_PATH='http://127.0.0.1:4000' +# LITE_LLM_API_KEY='sk-123abc' + ########################################### ######## Vector Database Selection ######## ########################################### diff --git a/frontend/src/components/EmbeddingSelection/LiteLLMOptions/index.jsx b/frontend/src/components/EmbeddingSelection/LiteLLMOptions/index.jsx new file mode 100644 index 00000000..d5586c88 --- /dev/null +++ b/frontend/src/components/EmbeddingSelection/LiteLLMOptions/index.jsx @@ -0,0 +1,186 @@ +import { useEffect, useState } from "react"; +import System from "@/models/system"; +import { Warning } from "@phosphor-icons/react"; +import { Tooltip } from "react-tooltip"; + +export default function LiteLLMOptions({ settings }) { + const [basePathValue, setBasePathValue] = useState(settings?.LiteLLMBasePath); + const [basePath, setBasePath] = useState(settings?.LiteLLMBasePath); + const [apiKeyValue, setApiKeyValue] = useState(settings?.LiteLLMAPIKey); + const [apiKey, setApiKey] = useState(settings?.LiteLLMAPIKey); + + return ( +

+
+
+ + setBasePathValue(e.target.value)} + onBlur={() => setBasePath(basePathValue)} + /> +
+ +
+ + e.target.blur()} + defaultValue={settings?.EmbeddingModelMaxChunkLength} + required={false} + autoComplete="off" + /> +
+
+
+
+
+ +
+ setApiKeyValue(e.target.value)} + onBlur={() => setApiKey(apiKeyValue)} + /> +
+
+
+ ); +} + +function LiteLLMModelSelection({ settings, basePath = null, apiKey = null }) { + const [customModels, setCustomModels] = useState([]); + const [loading, setLoading] = useState(true); + + useEffect(() => { + async function findCustomModels() { + if (!basePath) { + setCustomModels([]); + setLoading(false); + return; + } + setLoading(true); + const { models } = await System.customModels( + "litellm", + typeof apiKey === "boolean" ? null : apiKey, + basePath + ); + setCustomModels(models || []); + setLoading(false); + } + findCustomModels(); + }, [basePath, apiKey]); + + if (loading || customModels.length == 0) { + return ( +
+ + +
+ ); + } + + return ( +
+
+ + +
+ +
+ ); +} + +function EmbeddingModelTooltip() { + return ( +
+ + +

+ Be sure to select a valid embedding model. Chat models are not + embedding models. See{" "} + + this page + {" "} + for more information. +

+
+
+ ); +} diff --git a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx index 5a0f51c1..4d032dc0 100644 --- a/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx +++ b/frontend/src/pages/GeneralSettings/EmbeddingPreference/index.jsx @@ -11,6 +11,7 @@ import OllamaLogo from "@/media/llmprovider/ollama.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import CohereLogo from "@/media/llmprovider/cohere.png"; import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png"; +import LiteLLMLogo from "@/media/llmprovider/litellm.png"; import PreLoader from "@/components/Preloader"; import ChangeWarningModal from "@/components/ChangeWarning"; @@ -22,6 +23,7 @@ import OllamaEmbeddingOptions from "@/components/EmbeddingSelection/OllamaOption import LMStudioEmbeddingOptions from "@/components/EmbeddingSelection/LMStudioOptions"; import CohereEmbeddingOptions from "@/components/EmbeddingSelection/CohereOptions"; import VoyageAiOptions from "@/components/EmbeddingSelection/VoyageAiOptions"; +import LiteLLMOptions from "@/components/EmbeddingSelection/LiteLLMOptions"; import EmbedderItem from "@/components/EmbeddingSelection/EmbedderItem"; import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react"; @@ -88,6 +90,13 @@ const EMBEDDERS = [ options: (settings) => , description: "Run powerful embedding models from Voyage AI.", }, + { + name: "LiteLLM", + value: "litellm", + logo: LiteLLMLogo, + options: (settings) => , + description: "Run powerful embedding models from LiteLLM.", + }, ]; export default function GeneralEmbeddingPreference() { diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx index 35358636..b4fa666f 100644 --- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx +++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx @@ -301,6 +301,13 @@ export const EMBEDDING_ENGINE_PRIVACY = { ], logo: VoyageAiLogo, }, + litellm: { + name: "LiteLLM", + description: [ + "Your document text is only accessible on the server running LiteLLM and to the providers you configured in LiteLLM.", + ], + logo: LiteLLMLogo, + }, }; export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) { diff --git a/server/.env.example b/server/.env.example index f51d6177..6148d594 100644 --- a/server/.env.example +++ b/server/.env.example @@ -125,6 +125,12 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea # VOYAGEAI_API_KEY= # EMBEDDING_MODEL_PREF='voyage-large-2-instruct' +# EMBEDDING_ENGINE='litellm' +# EMBEDDING_MODEL_PREF='text-embedding-ada-002' +# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192 +# LITE_LLM_BASE_PATH='http://127.0.0.1:4000' +# LITE_LLM_API_KEY='sk-123abc' + ########################################### ######## Vector Database Selection ######## ########################################### diff --git a/server/utils/EmbeddingEngines/liteLLM/index.js b/server/utils/EmbeddingEngines/liteLLM/index.js new file mode 100644 index 00000000..cd22480b --- /dev/null +++ b/server/utils/EmbeddingEngines/liteLLM/index.js @@ -0,0 +1,93 @@ +const { toChunks, maximumChunkLength } = require("../../helpers"); + +class LiteLLMEmbedder { + constructor() { + const { OpenAI: OpenAIApi } = require("openai"); + if (!process.env.LITE_LLM_BASE_PATH) + throw new Error( + "LiteLLM must have a valid base path to use for the api." + ); + this.basePath = process.env.LITE_LLM_BASE_PATH; + this.openai = new OpenAIApi({ + baseURL: this.basePath, + apiKey: process.env.LITE_LLM_API_KEY ?? null, + }); + this.model = process.env.EMBEDDING_MODEL_PREF || "text-embedding-ada-002"; + + // Limit of how many strings we can process in a single pass to stay with resource or network limits + this.maxConcurrentChunks = 500; + this.embeddingMaxChunkLength = maximumChunkLength(); + } + + async embedTextInput(textInput) { + const result = await this.embedChunks( + Array.isArray(textInput) ? textInput : [textInput] + ); + return result?.[0] || []; + } + + async embedChunks(textChunks = []) { + // Because there is a hard POST limit on how many chunks can be sent at once to LiteLLM (~8mb) + // we concurrently execute each max batch of text chunks possible. + // Refer to constructor maxConcurrentChunks for more info. + const embeddingRequests = []; + for (const chunk of toChunks(textChunks, this.maxConcurrentChunks)) { + embeddingRequests.push( + new Promise((resolve) => { + this.openai.embeddings + .create({ + model: this.model, + input: chunk, + }) + .then((result) => { + resolve({ data: result?.data, error: null }); + }) + .catch((e) => { + e.type = + e?.response?.data?.error?.code || + e?.response?.status || + "failed_to_embed"; + e.message = e?.response?.data?.error?.message || e.message; + resolve({ data: [], error: e }); + }); + }) + ); + } + + const { data = [], error = null } = await Promise.all( + embeddingRequests + ).then((results) => { + // If any errors were returned from LiteLLM abort the entire sequence because the embeddings + // will be incomplete. + const errors = results + .filter((res) => !!res.error) + .map((res) => res.error) + .flat(); + if (errors.length > 0) { + let uniqueErrors = new Set(); + errors.map((error) => + uniqueErrors.add(`[${error.type}]: ${error.message}`) + ); + + return { + data: [], + error: Array.from(uniqueErrors).join(", "), + }; + } + return { + data: results.map((res) => res?.data || []).flat(), + error: null, + }; + }); + + if (!!error) throw new Error(`LiteLLM Failed to embed: ${error}`); + return data.length > 0 && + data.every((embd) => embd.hasOwnProperty("embedding")) + ? data.map((embd) => embd.embedding) + : null; + } +} + +module.exports = { + LiteLLMEmbedder, +}; diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js index e60202a6..8f0df126 100644 --- a/server/utils/helpers/index.js +++ b/server/utils/helpers/index.js @@ -128,6 +128,9 @@ function getEmbeddingEngineSelection() { case "voyageai": const { VoyageAiEmbedder } = require("../EmbeddingEngines/voyageAi"); return new VoyageAiEmbedder(); + case "litellm": + const { LiteLLMEmbedder } = require("../EmbeddingEngines/liteLLM"); + return new LiteLLMEmbedder(); default: return new NativeEmbedder(); } diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js index d5cdc68f..1a0e710a 100644 --- a/server/utils/helpers/updateENV.js +++ b/server/utils/helpers/updateENV.js @@ -577,6 +577,7 @@ function supportedEmbeddingModel(input = "") { "lmstudio", "cohere", "voyageai", + "litellm", ]; return supported.includes(input) ? null From 98cef508a6c879d290f1a29aaafb13e873e6b557 Mon Sep 17 00:00:00 2001 From: Timothy Carambat Date: Fri, 7 Jun 2024 03:50:42 +0800 Subject: [PATCH 09/12] Feature/devcontv2 (#1622) * Updated apt-packages source for devcontainer Switched the devcontainer's package source to a different repository to align with updated dependencies and package availability. The previous source from 'rocker-org' is replaced with 'devcontainers-contrib', which may offer more recent or relevant development tools. * Subject: Centralize prettier ignores and refine config Body: Centralized all prettier ignore rules by removing individual `.prettierignore` files in subprojects and updating the root `.prettierignore` to include previously ignored patterns, ensuring consistency across the workspace. Additionally, the prettier configuration was refined by making the file pattern for `.config.js` files consistent and adjusting quote styles for better readability. All lint scripts across the project were updated to respect the centralized ignore path, enhancing maintainability. The consolidation simplifies the process of managing ignore rules as the project scales, ensuring developers can focus on writing code without worrying about divergent formatting standards. These changes also align with introducing comprehensive linting across multiple environments to keep the codebase clean and consistent. This adjustment is a foundational step towards a more streamlined and unified code base, making it easier for new contributors to adhere to established coding standards and reducing the cognitive load associated with managing multiple configuration files across the project. * unset package json changes --------- Co-authored-by: Francisco Bischoff Co-authored-by: Francisco Bischoff <984592+franzbischoff@users.noreply.github.com> --- .devcontainer/devcontainer.json | 2 +- .prettierignore | 4 ++++ .prettierrc | 2 +- collector/package.json | 2 +- embed/.prettierignore | 9 --------- embed/jsconfig.json | 10 ++++------ embed/package.json | 3 ++- embed/vite.config.js | 6 +++--- frontend/jsconfig.json | 6 ++---- frontend/package.json | 2 +- frontend/vite.config.js | 2 +- server/package.json | 2 +- 12 files changed, 21 insertions(+), 29 deletions(-) delete mode 100644 embed/.prettierignore diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 83792da7..58c42b62 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -22,7 +22,7 @@ // Terraform support "ghcr.io/devcontainers/features/terraform:1": {}, // Just a wrap to install needed packages - "ghcr.io/rocker-org/devcontainer-features/apt-packages:1": { + "ghcr.io/devcontainers-contrib/features/apt-packages:1": { // Dependencies copied from ../docker/Dockerfile plus some dev stuff "packages": [ "build-essential", diff --git a/.prettierignore b/.prettierignore index faedf325..e3b0c14e 100644 --- a/.prettierignore +++ b/.prettierignore @@ -10,3 +10,7 @@ frontend/bundleinspector.html #server server/swagger/openapi.json + +#embed +**/static/** +embed/src/utils/chat/hljs.js diff --git a/.prettierrc b/.prettierrc index 3574c1df..5e2bccfe 100644 --- a/.prettierrc +++ b/.prettierrc @@ -17,7 +17,7 @@ } }, { - "files": "*.config.js", + "files": ["*.config.js"], "options": { "semi": false, "parser": "flow", diff --git a/collector/package.json b/collector/package.json index 785604e3..938d65e1 100644 --- a/collector/package.json +++ b/collector/package.json @@ -12,7 +12,7 @@ "scripts": { "dev": "NODE_ENV=development nodemon --ignore hotdir --ignore storage --trace-warnings index.js", "start": "NODE_ENV=production node index.js", - "lint": "yarn prettier --write ./processSingleFile ./processLink ./utils index.js" + "lint": "yarn prettier --ignore-path ../.prettierignore --write ./processSingleFile ./processLink ./utils index.js" }, "dependencies": { "@googleapis/youtube": "^9.0.0", diff --git a/embed/.prettierignore b/embed/.prettierignore deleted file mode 100644 index d90a3c08..00000000 --- a/embed/.prettierignore +++ /dev/null @@ -1,9 +0,0 @@ -# defaults -**/.git -**/.svn -**/.hg -**/node_modules - -**/dist -**/static/** -src/utils/chat/hljs.js diff --git a/embed/jsconfig.json b/embed/jsconfig.json index c8cc81fd..20cd368c 100644 --- a/embed/jsconfig.json +++ b/embed/jsconfig.json @@ -4,9 +4,7 @@ "target": "esnext", "jsx": "react", "paths": { - "@/*": [ - "./src/*" - ], - } - } -} \ No newline at end of file + "@/*": ["./src/*"], + }, + }, +} diff --git a/embed/package.json b/embed/package.json index eb399930..712af8e6 100644 --- a/embed/package.json +++ b/embed/package.json @@ -1,6 +1,7 @@ { "name": "anythingllm-embedded-chat", "private": false, + "license": "MIT", "type": "module", "scripts": { "dev": "nodemon -e js,jsx,css --watch src --exec \"yarn run dev:preview\"", @@ -8,7 +9,7 @@ "dev:build": "vite build && cat src/static/tailwind@3.4.1.js >> dist/anythingllm-chat-widget.js", "build": "vite build && cat src/static/tailwind@3.4.1.js >> dist/anythingllm-chat-widget.js && npx terser --compress -o dist/anythingllm-chat-widget.min.js -- dist/anythingllm-chat-widget.js", "build:publish": "yarn build && mkdir -p ../frontend/public/embed && cp -r dist/anythingllm-chat-widget.min.js ../frontend/public/embed/anythingllm-chat-widget.min.js", - "lint": "yarn prettier --write ./src" + "lint": "yarn prettier --ignore-path ../.prettierignore --write ./src" }, "dependencies": { "@microsoft/fetch-event-source": "^2.0.1", diff --git a/embed/vite.config.js b/embed/vite.config.js index 21506422..9e23c70d 100644 --- a/embed/vite.config.js +++ b/embed/vite.config.js @@ -38,7 +38,7 @@ export default defineConfig({ rollupOptions: { external: [ // Reduces transformation time by 50% and we don't even use this variant, so we can ignore. - /@phosphor-icons\/react\/dist\/ssr/, + /@phosphor-icons\/react\/dist\/ssr/ ] }, commonjsOptions: { @@ -51,7 +51,7 @@ export default defineConfig({ emptyOutDir: true, inlineDynamicImports: true, assetsDir: "", - sourcemap: 'inline', + sourcemap: "inline" }, optimizeDeps: { esbuildOptions: { @@ -60,5 +60,5 @@ export default defineConfig({ }, plugins: [] } - }, + } }) diff --git a/frontend/jsconfig.json b/frontend/jsconfig.json index c8cc81fd..e21fc376 100644 --- a/frontend/jsconfig.json +++ b/frontend/jsconfig.json @@ -4,9 +4,7 @@ "target": "esnext", "jsx": "react", "paths": { - "@/*": [ - "./src/*" - ], + "@/*": ["./src/*"] } } -} \ No newline at end of file +} diff --git a/frontend/package.json b/frontend/package.json index 11e612fc..2b669731 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -7,7 +7,7 @@ "start": "vite --open", "dev": "NODE_ENV=development vite --debug --host=0.0.0.0", "build": "vite build", - "lint": "yarn prettier --write ./src", + "lint": "yarn prettier --ignore-path ../.prettierignore --write ./src", "preview": "vite preview" }, "dependencies": { diff --git a/frontend/vite.config.js b/frontend/vite.config.js index 3785b947..ff96bdcd 100644 --- a/frontend/vite.config.js +++ b/frontend/vite.config.js @@ -51,7 +51,7 @@ export default defineConfig({ rollupOptions: { external: [ // Reduces transformation time by 50% and we don't even use this variant, so we can ignore. - /@phosphor-icons\/react\/dist\/ssr/, + /@phosphor-icons\/react\/dist\/ssr/ ] }, commonjsOptions: { diff --git a/server/package.json b/server/package.json index 4f995470..b107695c 100644 --- a/server/package.json +++ b/server/package.json @@ -12,7 +12,7 @@ "scripts": { "dev": "NODE_ENV=development nodemon --ignore documents --ignore vector-cache --ignore storage --ignore swagger --trace-warnings index.js", "start": "NODE_ENV=production node index.js", - "lint": "yarn prettier --write ./endpoints ./models ./utils index.js", + "lint": "yarn prettier --ignore-path ../.prettierignore --write ./endpoints ./models ./utils index.js", "swagger": "node ./swagger/init.js", "sqlite:migrate": "cd ./utils/prisma && node migrateFromSqlite.js" }, From 26c220503cbf22ec2b55fa588e1f795914beb89a Mon Sep 17 00:00:00 2001 From: Sean Hatfield Date: Thu, 6 Jun 2024 12:56:11 -0700 Subject: [PATCH 10/12] [FEAT] Edit message button (#1392) * WIP edit message feature * WIP edit message * WIP editing messages feature * Fix PFPs TODO: Fix default user profile image Add User and Assistant workspace response * unset PFP changes for later PR --------- Co-authored-by: timothycarambat --- frontend/package.json | 2 +- frontend/src/components/ChatBubble/index.jsx | 5 +- frontend/src/components/DefaultChat/index.jsx | 23 ++-- frontend/src/components/UserIcon/index.jsx | 2 +- .../src/components/UserIcon/workspace.png | Bin 0 -> 1486 bytes .../Actions/EditMessage/index.jsx | 126 ++++++++++++++++++ .../HistoricalMessage/Actions/index.jsx | 13 +- .../ChatHistory/HistoricalMessage/index.jsx | 88 ++++++++---- .../ChatHistory/PromptReply/index.jsx | 4 +- .../ChatContainer/ChatHistory/index.jsx | 45 +++++++ .../WorkspaceChat/ChatContainer/index.jsx | 1 + .../src/components/WorkspaceChat/index.jsx | 1 + frontend/src/models/workspace.js | 53 +++++++- frontend/src/models/workspaceThread.js | 45 +++++++ frontend/src/utils/chat/index.js | 9 +- server/endpoints/workspaceThreads.js | 78 ++++++++++- server/endpoints/workspaces.js | 62 ++++++++- server/models/workspaceChats.js | 18 +++ server/utils/helpers/chat/responses.js | 1 + 19 files changed, 513 insertions(+), 63 deletions(-) create mode 100644 frontend/src/components/UserIcon/workspace.png create mode 100644 frontend/src/components/WorkspaceChat/ChatContainer/ChatHistory/HistoricalMessage/Actions/EditMessage/index.jsx diff --git a/frontend/package.json b/frontend/package.json index 2b669731..8aa4dcfa 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -63,4 +63,4 @@ "tailwindcss": "^3.3.1", "vite": "^4.3.0" } -} +} \ No newline at end of file diff --git a/frontend/src/components/ChatBubble/index.jsx b/frontend/src/components/ChatBubble/index.jsx index 8d311883..c5a1f190 100644 --- a/frontend/src/components/ChatBubble/index.jsx +++ b/frontend/src/components/ChatBubble/index.jsx @@ -1,5 +1,5 @@ import React from "react"; -import Jazzicon from "../UserIcon"; +import UserIcon from "../UserIcon"; import { userFromStorage } from "@/utils/request"; import { AI_BACKGROUND_COLOR, USER_BACKGROUND_COLOR } from "@/utils/constants"; @@ -11,8 +11,7 @@ export default function ChatBubble({ message, type, popMsg }) {
- diff --git a/frontend/src/components/DefaultChat/index.jsx b/frontend/src/components/DefaultChat/index.jsx index 43ae6e7a..ae52a0d2 100644 --- a/frontend/src/components/DefaultChat/index.jsx +++ b/frontend/src/components/DefaultChat/index.jsx @@ -13,7 +13,7 @@ import { isMobile } from "react-device-detect"; import { SidebarMobileHeader } from "../Sidebar"; import ChatBubble from "../ChatBubble"; import System from "@/models/system"; -import Jazzicon from "../UserIcon"; +import UserIcon from "../UserIcon"; import { userFromStorage } from "@/utils/request"; import { AI_BACKGROUND_COLOR, USER_BACKGROUND_COLOR } from "@/utils/constants"; import useUser from "@/hooks/useUser"; @@ -46,7 +46,7 @@ export default function DefaultChatContainer() { className={`pt-10 pb-6 px-4 w-full flex gap-x-5 md:max-w-[80%] flex-col`} >
- +
- +
- +
- @@ -151,7 +150,7 @@ export default function DefaultChatContainer() { className={`py-6 px-4 w-full flex gap-x-5 md:max-w-[80%] flex-col`} >
- +
- @@ -213,7 +211,7 @@ export default function DefaultChatContainer() { className={`py-6 px-4 w-full flex gap-x-5 md:max-w-[80%] flex-col`} >
- +
- @@ -275,7 +272,7 @@ export default function DefaultChatContainer() { className={`py-6 px-4 w-full flex gap-x-5 md:max-w-[80%] flex-col`} >
- +
K~#7F?N)tE zl~ou&@8#YLmwUN_@*#LZmsoj`Vv#eGH3*rTTXSV)*-A@Gt)LfMUS9sU1C?Y< zZEbD7+wHl6b~Ax?Cr+GrNDV5AEKoEyo9(^;rOeOs3IaT~r@^y7hK3qIZzX0Di2)(B z)15qdGS}z#yFp)8bOXG)M$m3#)cynL-35pyAxbD*O=+*x)^AzAew__FEltf$g21~t zO_Y?Fl$f+;O?DoRf8W`8yr>}meQ4SN^4|UXGY;(A->B2+EgrYa(b3-4uGQ-)2N|@a zXRKJ6x$1tuf6|_@Y}sRiAp8b4UNYYr&CSi#Rh~cp__))}5HPeIumO+*c(|##^@82$ zVt?AlebCPW;C94czd2YlZBwvuW0_W~EyCphYtNoL_YRKV93B~ie!kgkUK}bha~4}y zR~HcknWKV8s371FE_4HGp%1>lm%sJqyW`{IKYe@on+oVlfSAsouN{mr#1^6crHdEO zT&=5X6GgEb=6cX}N%B%B&QXCPktmPH6VkVqgb^VS;BXO46GY4#jYdBZ@k2LIUsKcl zK=zt#JkO(pN!!IsTbCpz??wH_hWe_~oll*H@d3aD)Elj+sE}lilawGuiNSgV2y+V} z1=~j`lrA645(?93G%`^R7=!gW9}o&aU-!nLBi5|!?Dvq6%i$Q@x&6sgNXTNbObiVT z@jyzc8A*^B6@e5Lkr0cEiz8mIcmAYs`y}9lk)dEzPN!3e3<#kU+%j(W?%ioyw-wt& zSvLFJo{86AEjy~$>-#CCV@OCnk!V9iNhHBcDS^&;$ii(OqreIyQ_sfi6T&4VB*d0H zz3XF{5eqnDue`A5aBFj87tD>q9mi*7WpQc%1g;Gd2v80)FNhMBL&z+l%vtF9`T0bm zl2YU$B11dF=eDm7>Z1)sJQYY?P4(H1wpJuGcIHCW=IuLoSt~0m4aLR9ijR?z5h4)S zKFAa=DBCxe>$yom6dWZ|Fx>!%jDsVr0kUWled?^O#2mj^DeTskrmB4}?)d^74Z?V# zC4I%|2lMjI0YQWM!asEyQo^1-nNqc;7}-)%k{xX{MkSb+Osy*@ zNVF_pejMgFm&-9&_S~~r*!}RdIo)2D7q*ClOZMJpo`31>r5PE=Qqt1g{>e!l)YnQu zEp%KIf@Gntg>W~z?zQqGMH>nWFV9eCL7QNCO-=e79~?e(puM@V3A%%snVEcFUthwp zPd+PKwc7gf?XH4$unPlEY~Hv7n!f-%WLCX~dl0#nhVQ%^9xY5a7)@C+RI?<@oFq%| zWdv@6sZ$?)c(k#ux&;XVd=7_0f@?th+V*8n&f0Zx=JSA)n1Eo)-K$rxN=!~ok!3k*+HP!Q*xBFPd&6Wh4f}jPXDCwS4y2+~VkriLAw4E0 zW;wJG=|~o^2;WFDBRKWf1 !prev); + } + + useEffect(() => { + function listenForEdits() { + if (!chatId || !role) return; + window.addEventListener(EDIT_EVENT, onEditEvent); + } + listenForEdits(); + return () => { + window.removeEventListener(EDIT_EVENT, onEditEvent); + }; + }, [chatId, role]); + + return { isEditing, setIsEditing }; +} + +export function EditMessageAction({ chatId = null, role, isEditing }) { + function handleEditClick() { + window.dispatchEvent( + new CustomEvent(EDIT_EVENT, { detail: { chatId, role } }) + ); + } + + if (!chatId || isEditing) return null; + return ( +
+ + +
+ ); +} + +export function EditMessageForm({ + role, + chatId, + message, + adjustTextArea, + saveChanges, +}) { + const formRef = useRef(null); + function handleSaveMessage(e) { + e.preventDefault(); + const form = new FormData(e.target); + const editedMessage = form.get("editedMessage"); + saveChanges({ editedMessage, chatId, role }); + window.dispatchEvent( + new CustomEvent(EDIT_EVENT, { detail: { chatId, role } }) + ); + } + + function cancelEdits() { + window.dispatchEvent( + new CustomEvent(EDIT_EVENT, { detail: { chatId, role } }) + ); + return false; + } + + useEffect(() => { + if (!formRef || !formRef.current) return; + formRef.current.focus(); + adjustTextArea({ target: formRef.current }); + }, [formRef]); + + return ( +
+