2023-06-13 20:26:11 +02:00
SERVER_PORT=3001
2023-12-07 23:14:30 +01:00
STORAGE_DIR="/app/server/storage"
UID='1000'
GID='1000'
2023-08-04 23:56:27 +02:00
# JWT_SECRET="my-random-string-for-seeding" # Only needed if AUTH_TOKEN is set. Please generate random string at least 12 chars long.
###########################################
######## LLM API SElECTION ################
###########################################
2023-11-02 06:12:30 +01:00
# LLM_PROVIDER='openai'
2023-08-04 23:56:27 +02:00
# OPEN_AI_KEY=
2023-11-02 06:12:30 +01:00
# OPEN_MODEL_PREF='gpt-3.5-turbo'
2023-06-13 20:26:11 +02:00
2023-12-28 02:08:03 +01:00
# LLM_PROVIDER='gemini'
# GEMINI_API_KEY=
# GEMINI_LLM_MODEL_PREF='gemini-pro'
2023-08-04 23:56:27 +02:00
# LLM_PROVIDER='azure'
# AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY=
# OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model.
# EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
2023-10-30 23:44:03 +01:00
# LLM_PROVIDER='anthropic'
# ANTHROPIC_API_KEY=sk-ant-xxxx
# ANTHROPIC_MODEL_PREF='claude-2'
2023-11-09 21:33:21 +01:00
# LLM_PROVIDER='lmstudio'
# LMSTUDIO_BASE_PATH='http://your-server:1234/v1'
# LMSTUDIO_MODEL_TOKEN_LIMIT=4096
2023-11-14 21:31:44 +01:00
# LLM_PROVIDER='localai'
# LOCAL_AI_BASE_PATH='http://host.docker.internal:8080/v1'
# LOCAL_AI_MODEL_PREF='luna-ai-llama2'
# LOCAL_AI_MODEL_TOKEN_LIMIT=4096
2023-12-04 17:38:15 +01:00
# LOCAL_AI_API_KEY="sk-123abc"
2023-11-14 21:31:44 +01:00
2023-12-28 02:21:47 +01:00
# LLM_PROVIDER='ollama'
# OLLAMA_BASE_PATH='http://host.docker.internal:11434'
# OLLAMA_MODEL_PREF='llama2'
# OLLAMA_MODEL_TOKEN_LIMIT=4096
2024-01-10 21:35:30 +01:00
# LLM_PROVIDER='togetherai'
# TOGETHER_AI_API_KEY='my-together-ai-key'
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
2024-01-17 23:42:05 +01:00
# LLM_PROVIDER='mistral'
# MISTRAL_API_KEY='example-mistral-ai-api-key'
# MISTRAL_MODEL_PREF='mistral-tiny'
2024-02-22 21:48:57 +01:00
# LLM_PROVIDER='perplexity'
# PERPLEXITY_API_KEY='my-perplexity-key'
# PERPLEXITY_MODEL_PREF='codellama-34b-instruct'
2024-02-24 02:18:58 +01:00
# LLM_PROVIDER='openrouter'
# OPENROUTER_API_KEY='my-openrouter-key'
# OPENROUTER_MODEL_PREF='openrouter/auto'
2024-02-06 18:17:51 +01:00
# LLM_PROVIDER='huggingface'
# HUGGING_FACE_LLM_ENDPOINT=https://uuid-here.us-east-1.aws.endpoints.huggingface.cloud
# HUGGING_FACE_LLM_API_KEY=hf_xxxxxx
# HUGGING_FACE_LLM_TOKEN_LIMIT=8000
2023-10-30 23:44:03 +01:00
###########################################
######## Embedding API SElECTION ##########
###########################################
# Only used if you are using an LLM that does not natively support embedding (openai or Azure)
# EMBEDDING_ENGINE='openai'
# OPEN_AI_KEY=sk-xxxx
2024-01-29 17:48:27 +01:00
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
2023-08-04 23:56:27 +02:00
2023-11-14 22:49:31 +01:00
# EMBEDDING_ENGINE='azure'
# AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY=
# EMBEDDING_MODEL_PREF='my-embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
# EMBEDDING_ENGINE='localai'
2024-01-07 00:38:44 +01:00
# EMBEDDING_BASE_PATH='http://localhost:8080/v1'
2023-11-14 22:49:31 +01:00
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
2023-12-08 01:27:36 +01:00
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
2023-11-14 22:49:31 +01:00
2023-08-04 23:56:27 +02:00
###########################################
######## Vector Database Selection ########
###########################################
2023-06-13 20:26:11 +02:00
# Enable all below if you are using vector database: Chroma.
# VECTOR_DB="chroma"
2023-08-17 02:01:50 +02:00
# CHROMA_ENDPOINT='http://host.docker.internal:8000'
2023-09-29 22:20:06 +02:00
# CHROMA_API_HEADER="X-Api-Key"
# CHROMA_API_KEY="sk-123abc"
2023-06-13 20:26:11 +02:00
# Enable all below if you are using vector database: Pinecone.
2023-11-02 06:12:30 +01:00
# VECTOR_DB="pinecone"
# PINECONE_API_KEY=
# PINECONE_INDEX=
2023-06-13 20:26:11 +02:00
# Enable all below if you are using vector database: LanceDB.
2023-12-07 23:14:30 +01:00
# VECTOR_DB="lancedb"
2023-06-13 20:26:11 +02:00
2023-08-09 03:02:30 +02:00
# Enable all below if you are using vector database: Weaviate.
# VECTOR_DB="weaviate"
# WEAVIATE_ENDPOINT="http://localhost:8080"
# WEAVIATE_API_KEY=
2023-08-16 00:26:44 +02:00
# Enable all below if you are using vector database: Qdrant.
# VECTOR_DB="qdrant"
# QDRANT_ENDPOINT="http://localhost:6333"
# QDRANT_API_KEY=
2024-01-12 22:23:57 +01:00
# Enable all below if you are using vector database: Milvus.
# VECTOR_DB="milvus"
# MILVUS_ADDRESS="http://localhost:19530"
# MILVUS_USERNAME=
# MILVUS_PASSWORD=
2024-01-18 03:00:54 +01:00
# Enable all below if you are using vector database: Zilliz Cloud.
# VECTOR_DB="zilliz"
# ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com"
# ZILLIZ_API_TOKEN=api-token-here
2024-01-26 22:07:53 +01:00
# Enable all below if you are using vector database: Astra DB.
# VECTOR_DB="astra"
# ASTRA_DB_APPLICATION_TOKEN=
# ASTRA_DB_ENDPOINT=
2023-06-13 20:26:11 +02:00
# CLOUD DEPLOYMENT VARIRABLES ONLY
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
2024-02-12 18:05:30 +01:00
# DISABLE_TELEMETRY="false"
2023-12-05 18:13:06 +01:00
###########################################
######## PASSWORD COMPLEXITY ##############
###########################################
# Enforce a password schema for your organization users.
# Documentation on how to use https://github.com/kamronbatman/joi-password-complexity
# Default is only 8 char minimum
# PASSWORDMINCHAR=8
# PASSWORDMAXCHAR=250
# PASSWORDLOWERCASE=1
# PASSWORDUPPERCASE=1
# PASSWORDNUMERIC=1
# PASSWORDSYMBOL=1
2024-01-05 02:22:15 +01:00
# PASSWORDREQUIREMENTS=4
###########################################
######## ENABLE HTTPS SERVER ##############
###########################################
# By enabling this and providing the path/filename for the key and cert,
# the server will use HTTPS instead of HTTP.
#ENABLE_HTTPS="true"
#HTTPS_CERT_PATH="sslcert/cert.pem"
#HTTPS_KEY_PATH="sslcert/key.pem"