2023-06-13 20:26:11 +02:00
SERVER_PORT=3001
2023-12-07 23:14:30 +01:00
STORAGE_DIR="/app/server/storage"
UID='1000'
GID='1000'
2023-08-04 23:56:27 +02:00
# JWT_SECRET="my-random-string-for-seeding" # Only needed if AUTH_TOKEN is set. Please generate random string at least 12 chars long.
###########################################
######## LLM API SElECTION ################
###########################################
2023-11-02 06:12:30 +01:00
# LLM_PROVIDER='openai'
2023-08-04 23:56:27 +02:00
# OPEN_AI_KEY=
2024-05-13 23:31:49 +02:00
# OPEN_MODEL_PREF='gpt-4o'
2023-06-13 20:26:11 +02:00
2023-12-28 02:08:03 +01:00
# LLM_PROVIDER='gemini'
# GEMINI_API_KEY=
# GEMINI_LLM_MODEL_PREF='gemini-pro'
2023-08-04 23:56:27 +02:00
# LLM_PROVIDER='azure'
# AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY=
# OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model.
# EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
2023-10-30 23:44:03 +01:00
# LLM_PROVIDER='anthropic'
# ANTHROPIC_API_KEY=sk-ant-xxxx
# ANTHROPIC_MODEL_PREF='claude-2'
2023-11-09 21:33:21 +01:00
# LLM_PROVIDER='lmstudio'
# LMSTUDIO_BASE_PATH='http://your-server:1234/v1'
2024-03-22 22:39:30 +01:00
# LMSTUDIO_MODEL_PREF='Loaded from Chat UI' # this is a bug in LMStudio 0.2.17
2023-11-09 21:33:21 +01:00
# LMSTUDIO_MODEL_TOKEN_LIMIT=4096
2023-11-14 21:31:44 +01:00
# LLM_PROVIDER='localai'
# LOCAL_AI_BASE_PATH='http://host.docker.internal:8080/v1'
# LOCAL_AI_MODEL_PREF='luna-ai-llama2'
# LOCAL_AI_MODEL_TOKEN_LIMIT=4096
2023-12-04 17:38:15 +01:00
# LOCAL_AI_API_KEY="sk-123abc"
2023-11-14 21:31:44 +01:00
2023-12-28 02:21:47 +01:00
# LLM_PROVIDER='ollama'
# OLLAMA_BASE_PATH='http://host.docker.internal:11434'
# OLLAMA_MODEL_PREF='llama2'
# OLLAMA_MODEL_TOKEN_LIMIT=4096
2024-01-10 21:35:30 +01:00
# LLM_PROVIDER='togetherai'
# TOGETHER_AI_API_KEY='my-together-ai-key'
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
2024-01-17 23:42:05 +01:00
# LLM_PROVIDER='mistral'
# MISTRAL_API_KEY='example-mistral-ai-api-key'
# MISTRAL_MODEL_PREF='mistral-tiny'
2024-02-22 21:48:57 +01:00
# LLM_PROVIDER='perplexity'
# PERPLEXITY_API_KEY='my-perplexity-key'
# PERPLEXITY_MODEL_PREF='codellama-34b-instruct'
2024-02-24 02:18:58 +01:00
# LLM_PROVIDER='openrouter'
# OPENROUTER_API_KEY='my-openrouter-key'
# OPENROUTER_MODEL_PREF='openrouter/auto'
2024-02-06 18:17:51 +01:00
# LLM_PROVIDER='huggingface'
# HUGGING_FACE_LLM_ENDPOINT=https://uuid-here.us-east-1.aws.endpoints.huggingface.cloud
# HUGGING_FACE_LLM_API_KEY=hf_xxxxxx
# HUGGING_FACE_LLM_TOKEN_LIMIT=8000
2024-03-06 23:48:38 +01:00
# LLM_PROVIDER='groq'
# GROQ_API_KEY=gsk_abcxyz
2024-04-30 21:33:42 +02:00
# GROQ_MODEL_PREF=llama3-8b-8192
2024-03-06 23:48:38 +01:00
2024-05-02 21:12:44 +02:00
# LLM_PROVIDER='koboldcpp'
# KOBOLD_CPP_BASE_PATH='http://127.0.0.1:5000/v1'
# KOBOLD_CPP_MODEL_PREF='koboldcpp/codellama-7b-instruct.Q4_K_S'
# KOBOLD_CPP_MODEL_TOKEN_LIMIT=4096
2024-05-08 20:56:30 +02:00
# LLM_PROVIDER='textgenwebui'
# TEXT_GEN_WEB_UI_BASE_PATH='http://127.0.0.1:5000/v1'
# TEXT_GEN_WEB_UI_TOKEN_LIMIT=4096
2024-05-13 21:58:16 +02:00
# TEXT_GEN_WEB_UI_API_KEY='sk-123abc'
2024-05-08 20:56:30 +02:00
2024-04-23 22:06:07 +02:00
# LLM_PROVIDER='generic-openai'
# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
# GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
# GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
# GENERIC_OPEN_AI_API_KEY=sk-123abc
2024-05-16 22:56:28 +02:00
# LLM_PROVIDER='litellm'
# LITE_LLM_MODEL_PREF='gpt-3.5-turbo'
# LITE_LLM_MODEL_TOKEN_LIMIT=4096
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
# LITE_LLM_API_KEY='sk-123abc'
2024-05-02 19:35:50 +02:00
# LLM_PROVIDER='cohere'
# COHERE_API_KEY=
# COHERE_MODEL_PREF='command-r'
2023-10-30 23:44:03 +01:00
###########################################
######## Embedding API SElECTION ##########
###########################################
# Only used if you are using an LLM that does not natively support embedding (openai or Azure)
# EMBEDDING_ENGINE='openai'
# OPEN_AI_KEY=sk-xxxx
2024-01-29 17:48:27 +01:00
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
2023-08-04 23:56:27 +02:00
2023-11-14 22:49:31 +01:00
# EMBEDDING_ENGINE='azure'
# AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY=
# EMBEDDING_MODEL_PREF='my-embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
# EMBEDDING_ENGINE='localai'
2024-01-07 00:38:44 +01:00
# EMBEDDING_BASE_PATH='http://localhost:8080/v1'
2023-11-14 22:49:31 +01:00
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
2023-12-08 01:27:36 +01:00
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
2023-11-14 22:49:31 +01:00
2024-02-27 01:12:20 +01:00
# EMBEDDING_ENGINE='ollama'
2024-04-20 00:36:07 +02:00
# EMBEDDING_BASE_PATH='http://host.docker.internal:11434'
2024-02-27 01:12:20 +01:00
# EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
2024-04-20 00:36:07 +02:00
# EMBEDDING_ENGINE='lmstudio'
# EMBEDDING_BASE_PATH='https://host.docker.internal:1234/v1'
# EMBEDDING_MODEL_PREF='nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.Q4_0.gguf'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
2024-05-02 19:35:50 +02:00
# EMBEDDING_ENGINE='cohere'
# COHERE_API_KEY=
# EMBEDDING_MODEL_PREF='embed-english-v3.0'
2024-05-19 20:20:23 +02:00
# EMBEDDING_ENGINE='voyageai'
# VOYAGEAI_API_KEY=
# EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
2024-06-06 21:43:34 +02:00
# EMBEDDING_ENGINE='litellm'
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
# EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
# LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
# LITE_LLM_API_KEY='sk-123abc'
2023-08-04 23:56:27 +02:00
###########################################
######## Vector Database Selection ########
###########################################
2023-06-13 20:26:11 +02:00
# Enable all below if you are using vector database: Chroma.
# VECTOR_DB="chroma"
2023-08-17 02:01:50 +02:00
# CHROMA_ENDPOINT='http://host.docker.internal:8000'
2023-09-29 22:20:06 +02:00
# CHROMA_API_HEADER="X-Api-Key"
# CHROMA_API_KEY="sk-123abc"
2023-06-13 20:26:11 +02:00
# Enable all below if you are using vector database: Pinecone.
2023-11-02 06:12:30 +01:00
# VECTOR_DB="pinecone"
# PINECONE_API_KEY=
# PINECONE_INDEX=
2023-06-13 20:26:11 +02:00
# Enable all below if you are using vector database: LanceDB.
2023-12-07 23:14:30 +01:00
# VECTOR_DB="lancedb"
2023-06-13 20:26:11 +02:00
2023-08-09 03:02:30 +02:00
# Enable all below if you are using vector database: Weaviate.
# VECTOR_DB="weaviate"
# WEAVIATE_ENDPOINT="http://localhost:8080"
# WEAVIATE_API_KEY=
2023-08-16 00:26:44 +02:00
# Enable all below if you are using vector database: Qdrant.
# VECTOR_DB="qdrant"
# QDRANT_ENDPOINT="http://localhost:6333"
# QDRANT_API_KEY=
2024-01-12 22:23:57 +01:00
# Enable all below if you are using vector database: Milvus.
# VECTOR_DB="milvus"
# MILVUS_ADDRESS="http://localhost:19530"
# MILVUS_USERNAME=
# MILVUS_PASSWORD=
2024-01-18 03:00:54 +01:00
# Enable all below if you are using vector database: Zilliz Cloud.
# VECTOR_DB="zilliz"
# ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com"
# ZILLIZ_API_TOKEN=api-token-here
2024-01-26 22:07:53 +01:00
# Enable all below if you are using vector database: Astra DB.
# VECTOR_DB="astra"
# ASTRA_DB_APPLICATION_TOKEN=
# ASTRA_DB_ENDPOINT=
2024-03-14 23:43:26 +01:00
###########################################
######## Audio Model Selection ############
###########################################
# (default) use built-in whisper-small model.
# WHISPER_PROVIDER="local"
# use openai hosted whisper model.
# WHISPER_PROVIDER="openai"
# OPEN_AI_KEY=sk-xxxxxxxx
2024-05-14 20:57:21 +02:00
###########################################
######## TTS/STT Model Selection ##########
###########################################
# TTS_PROVIDER="native"
# TTS_PROVIDER="openai"
# TTS_OPEN_AI_KEY=sk-example
# TTS_OPEN_AI_VOICE_MODEL=nova
# TTS_PROVIDER="elevenlabs"
# TTS_ELEVEN_LABS_KEY=
# TTS_ELEVEN_LABS_VOICE_MODEL=21m00Tcm4TlvDq8ikWAM # Rachel
2023-06-13 20:26:11 +02:00
# CLOUD DEPLOYMENT VARIRABLES ONLY
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
2024-02-12 18:05:30 +01:00
# DISABLE_TELEMETRY="false"
2023-12-05 18:13:06 +01:00
###########################################
######## PASSWORD COMPLEXITY ##############
###########################################
# Enforce a password schema for your organization users.
# Documentation on how to use https://github.com/kamronbatman/joi-password-complexity
# Default is only 8 char minimum
# PASSWORDMINCHAR=8
# PASSWORDMAXCHAR=250
# PASSWORDLOWERCASE=1
# PASSWORDUPPERCASE=1
# PASSWORDNUMERIC=1
# PASSWORDSYMBOL=1
2024-01-05 02:22:15 +01:00
# PASSWORDREQUIREMENTS=4
###########################################
######## ENABLE HTTPS SERVER ##############
###########################################
# By enabling this and providing the path/filename for the key and cert,
# the server will use HTTPS instead of HTTP.
#ENABLE_HTTPS="true"
#HTTPS_CERT_PATH="sslcert/cert.pem"
#HTTPS_KEY_PATH="sslcert/key.pem"
2024-04-16 19:50:10 +02:00
###########################################
######## AGENT SERVICE KEYS ###############
###########################################
#------ SEARCH ENGINES -------
#=============================
#------ Google Search -------- https://programmablesearchengine.google.com/controlpanel/create
# AGENT_GSE_KEY=
# AGENT_GSE_CTX=
#------ Serper.dev ----------- https://serper.dev/
2024-05-24 01:49:30 +02:00
# AGENT_SERPER_DEV_KEY=
#------ Bing Search ----------- https://portal.azure.com/
2024-06-11 00:17:41 +02:00
# AGENT_BING_SEARCH_API_KEY=
#------ Serply.io ----------- https://serply.io/
# AGENT_SERPLY_API_KEY=