anything-llm/server/.env.example
Tobias Landenberger a96a9d41a3
LocalAI for embeddings (#361)
* feature: add localAi as embedding provider

* chore: add LocalAI image

* chore: add localai embedding examples to docker .env.example

* update setting env
pull models from localai API

* update comments on embedder
Dont show cost estimation on UI

---------

Co-authored-by: timothycarambat <rambat1010@gmail.com>
2023-11-14 13:49:31 -08:00

80 lines
2.7 KiB
Plaintext

SERVER_PORT=3001
CACHE_VECTORS="true"
JWT_SECRET="my-random-string-for-seeding" # Please generate random string at least 12 chars long.
###########################################
######## LLM API SElECTION ################
###########################################
# LLM_PROVIDER='openai'
# OPEN_AI_KEY=
# OPEN_MODEL_PREF='gpt-3.5-turbo'
# LLM_PROVIDER='azure'
# AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY=
# OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model.
# EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
# LLM_PROVIDER='anthropic'
# ANTHROPIC_API_KEY=sk-ant-xxxx
# ANTHROPIC_MODEL_PREF='claude-2'
# LLM_PROVIDER='lmstudio'
# LMSTUDIO_BASE_PATH='http://your-server:1234/v1'
# LMSTUDIO_MODEL_TOKEN_LIMIT=4096
# LLM_PROVIDER='localai'
# LOCAL_AI_BASE_PATH='http://localhost:8080/v1'
# LOCAL_AI_MODEL_PREF='luna-ai-llama2'
# LOCAL_AI_MODEL_TOKEN_LIMIT=4096
###########################################
######## Embedding API SElECTION ##########
###########################################
# Only used if you are using an LLM that does not natively support embedding (openai or Azure)
# EMBEDDING_ENGINE='openai'
# OPEN_AI_KEY=sk-xxxx
# EMBEDDING_ENGINE='azure'
# AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY=
# EMBEDDING_MODEL_PREF='my-embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
# EMBEDDING_ENGINE='localai'
# EMBEDDING_BASE_PATH='https://localhost:8080/v1'
# EMBEDDING_MODEL_PREF='text-embedding-ada-002'
###########################################
######## Vector Database Selection ########
###########################################
# Enable all below if you are using vector database: Chroma.
# VECTOR_DB="chroma"
# CHROMA_ENDPOINT='http://localhost:8000'
# CHROMA_API_HEADER="X-Api-Key"
# CHROMA_API_KEY="sk-123abc"
# Enable all below if you are using vector database: Pinecone.
# VECTOR_DB="pinecone"
# PINECONE_ENVIRONMENT=
# PINECONE_API_KEY=
# PINECONE_INDEX=
# Enable all below if you are using vector database: LanceDB.
VECTOR_DB="lancedb"
# Enable all below if you are using vector database: Weaviate.
# VECTOR_DB="weaviate"
# WEAVIATE_ENDPOINT="http://localhost:8080"
# WEAVIATE_API_KEY=
# Enable all below if you are using vector database: Qdrant.
# VECTOR_DB="qdrant"
# QDRANT_ENDPOINT="http://localhost:6333"
# QDRANT_API_KEY=
# CLOUD DEPLOYMENT VARIRABLES ONLY
# AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
# STORAGE_DIR= # absolute filesystem path with no trailing slash
# NO_DEBUG="true"