mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2024-11-05 06:20:10 +01:00
Prevent external service localhost question (#497)
* Prevent external service localhost question * add 0.0.0.0 to docker-invalid URL * clarify hint
This commit is contained in:
parent
e0a0a8976d
commit
d7481671ba
1
.vscode/settings.json
vendored
1
.vscode/settings.json
vendored
@ -1,5 +1,6 @@
|
||||
{
|
||||
"cSpell.words": [
|
||||
"Dockerized",
|
||||
"Ollama",
|
||||
"openai",
|
||||
"Qdrant",
|
||||
|
@ -83,6 +83,7 @@ RUN cd ./server && npx prisma migrate deploy --schema=./prisma/schema.prisma
|
||||
|
||||
# Setup the environment
|
||||
ENV NODE_ENV=production
|
||||
ENV ANYTHING_LLM_RUNTIME=docker
|
||||
|
||||
# Expose the server port
|
||||
EXPOSE 3001
|
||||
|
@ -56,7 +56,7 @@ const KEY_MAPPING = {
|
||||
// LMStudio Settings
|
||||
LMStudioBasePath: {
|
||||
envKey: "LMSTUDIO_BASE_PATH",
|
||||
checks: [isNotEmpty, validLLMExternalBasePath],
|
||||
checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl],
|
||||
},
|
||||
LMStudioTokenLimit: {
|
||||
envKey: "LMSTUDIO_MODEL_TOKEN_LIMIT",
|
||||
@ -66,7 +66,7 @@ const KEY_MAPPING = {
|
||||
// LocalAI Settings
|
||||
LocalAiBasePath: {
|
||||
envKey: "LOCAL_AI_BASE_PATH",
|
||||
checks: [isNotEmpty, validLLMExternalBasePath],
|
||||
checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl],
|
||||
},
|
||||
LocalAiModelPref: {
|
||||
envKey: "LOCAL_AI_MODEL_PREF",
|
||||
@ -83,7 +83,7 @@ const KEY_MAPPING = {
|
||||
|
||||
OllamaLLMBasePath: {
|
||||
envKey: "OLLAMA_BASE_PATH",
|
||||
checks: [isNotEmpty, validOllamaLLMBasePath],
|
||||
checks: [isNotEmpty, validOllamaLLMBasePath, validDockerizedUrl],
|
||||
},
|
||||
OllamaLLMModelPref: {
|
||||
envKey: "OLLAMA_MODEL_PREF",
|
||||
@ -106,7 +106,7 @@ const KEY_MAPPING = {
|
||||
},
|
||||
EmbeddingBasePath: {
|
||||
envKey: "EMBEDDING_BASE_PATH",
|
||||
checks: [isNotEmpty, validLLMExternalBasePath],
|
||||
checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl],
|
||||
},
|
||||
EmbeddingModelPref: {
|
||||
envKey: "EMBEDDING_MODEL_PREF",
|
||||
@ -126,7 +126,7 @@ const KEY_MAPPING = {
|
||||
// Chroma Options
|
||||
ChromaEndpoint: {
|
||||
envKey: "CHROMA_ENDPOINT",
|
||||
checks: [isValidURL, validChromaURL],
|
||||
checks: [isValidURL, validChromaURL, validDockerizedUrl],
|
||||
},
|
||||
ChromaApiHeader: {
|
||||
envKey: "CHROMA_API_HEADER",
|
||||
@ -140,7 +140,7 @@ const KEY_MAPPING = {
|
||||
// Weaviate Options
|
||||
WeaviateEndpoint: {
|
||||
envKey: "WEAVIATE_ENDPOINT",
|
||||
checks: [isValidURL],
|
||||
checks: [isValidURL, validDockerizedUrl],
|
||||
},
|
||||
WeaviateApiKey: {
|
||||
envKey: "WEAVIATE_API_KEY",
|
||||
@ -150,7 +150,7 @@ const KEY_MAPPING = {
|
||||
// QDrant Options
|
||||
QdrantEndpoint: {
|
||||
envKey: "QDRANT_ENDPOINT",
|
||||
checks: [isValidURL],
|
||||
checks: [isValidURL, validDockerizedUrl],
|
||||
},
|
||||
QdrantApiKey: {
|
||||
envKey: "QDRANT_API_KEY",
|
||||
@ -318,6 +318,17 @@ function isDownloadedModel(input = "") {
|
||||
return files.includes(input);
|
||||
}
|
||||
|
||||
function validDockerizedUrl(input = "") {
|
||||
if (process.env.ANYTHING_LLM_RUNTIME !== "docker") return null;
|
||||
try {
|
||||
const { hostname } = new URL(input);
|
||||
if (["localhost", "127.0.0.1", "0.0.0.0"].includes(hostname.toLowerCase()))
|
||||
return "Localhost, 127.0.0.1, or 0.0.0.0 origins cannot be reached from inside the AnythingLLM container. Please use host.docker.internal, a real machine ip, or domain to connect to your service.";
|
||||
return null;
|
||||
} catch {}
|
||||
return null;
|
||||
}
|
||||
|
||||
// This will force update .env variables which for any which reason were not able to be parsed or
|
||||
// read from an ENV file as this seems to be a complicating step for many so allowing people to write
|
||||
// to the process will at least alleviate that issue. It does not perform comprehensive validity checks or sanity checks
|
||||
|
Loading…
Reference in New Issue
Block a user