Merge branch 'master' of github.com:Mintplex-Labs/anything-llm into render

This commit is contained in:
timothycarambat 2024-07-19 15:11:56 -07:00
commit 4ff57eee4b
8 changed files with 74 additions and 89 deletions

View File

@ -4,7 +4,6 @@
**/server/storage/*.db **/server/storage/*.db
**/server/storage/lancedb **/server/storage/lancedb
**/collector/hotdir/** **/collector/hotdir/**
**/collector/v-env/**
**/collector/outputs/** **/collector/outputs/**
**/node_modules/ **/node_modules/
**/dist/ **/dist/
@ -13,6 +12,7 @@
**/.env **/.env
**/.env.* **/.env.*
**/bundleinspector.html **/bundleinspector.html
!docker/.env.example
!frontend/.env.production
**/tmp/** **/tmp/**
**/.log
!docker/.env.example
!frontend/.env.production

View File

@ -27,7 +27,9 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get update && \
apt-get install -yq --no-install-recommends nodejs && \ apt-get install -yq --no-install-recommends nodejs && \
curl -LO https://github.com/yarnpkg/yarn/releases/download/v1.22.19/yarn_1.22.19_all.deb \ curl -LO https://github.com/yarnpkg/yarn/releases/download/v1.22.19/yarn_1.22.19_all.deb \
&& dpkg -i yarn_1.22.19_all.deb \ && dpkg -i yarn_1.22.19_all.deb \
&& rm yarn_1.22.19_all.deb && rm yarn_1.22.19_all.deb && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Create a group and user with specific UID and GID # Create a group and user with specific UID and GID
RUN groupadd -g "$ARG_GID" anythingllm && \ RUN groupadd -g "$ARG_GID" anythingllm && \
@ -85,7 +87,9 @@ RUN DEBIAN_FRONTEND=noninteractive apt-get update && \
apt-get install -yq --no-install-recommends nodejs && \ apt-get install -yq --no-install-recommends nodejs && \
curl -LO https://github.com/yarnpkg/yarn/releases/download/v1.22.19/yarn_1.22.19_all.deb \ curl -LO https://github.com/yarnpkg/yarn/releases/download/v1.22.19/yarn_1.22.19_all.deb \
&& dpkg -i yarn_1.22.19_all.deb \ && dpkg -i yarn_1.22.19_all.deb \
&& rm yarn_1.22.19_all.deb && rm yarn_1.22.19_all.deb && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Create a group and user with specific UID and GID # Create a group and user with specific UID and GID
RUN groupadd -g "$ARG_GID" anythingllm && \ RUN groupadd -g "$ARG_GID" anythingllm && \
@ -112,56 +116,58 @@ RUN echo "Running common build flow of AnythingLLM image for all architectures"
USER anythingllm USER anythingllm
WORKDIR /app WORKDIR /app
# Install frontend dependencies # Install & Build frontend layer
FROM build AS frontend-deps FROM build AS frontend-build
COPY --chown=anythingllm:anythingllm ./frontend /app/frontend/
COPY ./frontend/package.json ./frontend/yarn.lock ./frontend/
WORKDIR /app/frontend WORKDIR /app/frontend
RUN yarn install --network-timeout 100000 && yarn cache clean RUN yarn install --network-timeout 100000 && yarn cache clean
RUN yarn build && \
cp -r dist /tmp/frontend-build && \
rm -rf * && \
cp -r /tmp/frontend-build dist && \
rm -rf /tmp/frontend-build
WORKDIR /app WORKDIR /app
# Install server dependencies # Install server layer & build node-llama-cpp
FROM build AS server-deps FROM build AS server-build
COPY ./server/package.json ./server/yarn.lock ./server/ COPY ./server /app/server/
WORKDIR /app/server WORKDIR /app/server
RUN yarn install --production --network-timeout 100000 && yarn cache clean RUN yarn install --production --network-timeout 100000 && yarn cache clean
WORKDIR /app WORKDIR /app
# Compile Llama.cpp bindings for node-llama-cpp for this operating system. # Compile Llama.cpp bindings for node-llama-cpp for this operating system.
# Creates appropriate bindings for the OS
USER root USER root
WORKDIR /app/server WORKDIR /app/server
RUN npx --no node-llama-cpp download RUN npx --no node-llama-cpp download
WORKDIR /app WORKDIR /app
USER anythingllm USER anythingllm
# Build the frontend # Build collector deps (this also downloads proper chrome for collector in /app/.cache so that needs to be
FROM frontend-deps AS build-stage # transferred properly in prod-build stage.
COPY ./frontend/ ./frontend/ FROM build AS collector-build
WORKDIR /app/frontend COPY ./collector /app/collector
RUN yarn build && yarn cache clean && rm -rf node_modules
WORKDIR /app
# Setup the server
FROM server-deps AS production-stage
COPY --chown=anythingllm:anythingllm ./server/ ./server/
# Copy built static frontend files to the server public directory
COPY --chown=anythingllm:anythingllm --from=build-stage /app/frontend/dist ./server/public
# Copy the collector
COPY --chown=anythingllm:anythingllm ./collector/ ./collector/
# Install collector dependencies
WORKDIR /app/collector WORKDIR /app/collector
ENV PUPPETEER_DOWNLOAD_BASE_URL=https://storage.googleapis.com/chrome-for-testing-public ENV PUPPETEER_DOWNLOAD_BASE_URL=https://storage.googleapis.com/chrome-for-testing-public
RUN yarn install --production --network-timeout 100000 && yarn cache clean RUN yarn install --production --network-timeout 100000 && yarn cache clean
# Migrate and Run Prisma against known schema
WORKDIR /app/server
RUN npx prisma generate --schema=./prisma/schema.prisma && \
npx prisma migrate deploy --schema=./prisma/schema.prisma
WORKDIR /app WORKDIR /app
FROM build AS production-build
WORKDIR /app
# Copy the server
COPY --chown=anythingllm:anythingllm --from=server-build /app/server/ /app/server/
# Copy built static frontend files to the server public directory
COPY --chown=anythingllm:anythingllm --from=frontend-build /app/frontend/dist /app/server/public
# Copy the collector
COPY --chown=anythingllm:anythingllm --from=collector-build /app/collector/ /app/collector/
COPY --chown=anythingllm:anythingllm --from=collector-build /app/.cache/puppeteer /app/.cache/puppeteer
# No longer needed? (deprecated)
# WORKDIR /app/server
# RUN npx prisma generate --schema=./prisma/schema.prisma && \
# npx prisma migrate deploy --schema=./prisma/schema.prisma
# WORKDIR /app
# Setup the environment # Setup the environment
ENV NODE_ENV=production ENV NODE_ENV=production
ENV ANYTHING_LLM_RUNTIME=docker ENV ANYTHING_LLM_RUNTIME=docker
@ -174,4 +180,5 @@ HEALTHCHECK --interval=1m --timeout=10s --start-period=1m \
CMD /bin/bash /usr/local/bin/docker-healthcheck.sh || exit 1 CMD /bin/bash /usr/local/bin/docker-healthcheck.sh || exit 1
# Run the server # Run the server
# CMD ["sh", "-c", "tail -f /dev/null"] # For development: keep container open
ENTRYPOINT ["/bin/bash", "/usr/local/bin/docker-entrypoint.sh"] ENTRYPOINT ["/bin/bash", "/usr/local/bin/docker-entrypoint.sh"]

View File

@ -1,5 +1,3 @@
version: "3.9"
name: anythingllm name: anythingllm
networks: networks:
@ -8,8 +6,7 @@ networks:
services: services:
anything-llm: anything-llm:
container_name: anything-llm container_name: anythingllm
image: anything-llm:latest
build: build:
context: ../. context: ../.
dockerfile: ./docker/Dockerfile dockerfile: ./docker/Dockerfile

View File

@ -168,8 +168,8 @@ export default function ActiveWorkspaces() {
isInWorkspaceSettings && workspace.slug === slug isInWorkspaceSettings && workspace.slug === slug
? "#46C8FF" ? "#46C8FF"
: gearHover[workspace.id] : gearHover[workspace.id]
? "#FFFFFF" ? "#FFFFFF"
: "#A7A8A9" : "#A7A8A9"
} }
weight="bold" weight="bold"
className="h-[20px] w-[20px]" className="h-[20px] w-[20px]"

View File

@ -9,9 +9,8 @@ export default function LiveSyncToggle({ enabled = false, onToggle }) {
const [status, setStatus] = useState(enabled); const [status, setStatus] = useState(enabled);
async function toggleFeatureFlag() { async function toggleFeatureFlag() {
const updated = await System.experimentalFeatures.liveSync.toggleFeature( const updated =
!status await System.experimentalFeatures.liveSync.toggleFeature(!status);
);
if (!updated) { if (!updated) {
showToast("Failed to update status of feature.", "error", { showToast("Failed to update status of feature.", "error", {
clear: true, clear: true,

View File

@ -1,23 +1,23 @@
const MODELS = { const MODELS = {
"sonar-small-chat": { "llama-3-sonar-small-32k-online\\*": {
id: "sonar-small-chat", id: "llama-3-sonar-small-32k-online\\*",
name: "sonar-small-chat", name: "llama-3-sonar-small-32k-online\\*",
maxLength: 16384, maxLength: 28000,
}, },
"sonar-small-online": { "llama-3-sonar-small-32k-chat": {
id: "sonar-small-online", id: "llama-3-sonar-small-32k-chat",
name: "sonar-small-online", name: "llama-3-sonar-small-32k-chat",
maxLength: 12000, maxLength: 32768,
}, },
"sonar-medium-chat": { "llama-3-sonar-large-32k-online\\*": {
id: "sonar-medium-chat", id: "llama-3-sonar-large-32k-online\\*",
name: "sonar-medium-chat", name: "llama-3-sonar-large-32k-online\\*",
maxLength: 16384, maxLength: 28000,
}, },
"sonar-medium-online": { "llama-3-sonar-large-32k-chat": {
id: "sonar-medium-online", id: "llama-3-sonar-large-32k-chat",
name: "sonar-medium-online", name: "llama-3-sonar-large-32k-chat",
maxLength: 12000, maxLength: 32768,
}, },
"llama-3-8b-instruct": { "llama-3-8b-instruct": {
id: "llama-3-8b-instruct", id: "llama-3-8b-instruct",
@ -29,26 +29,11 @@ const MODELS = {
name: "llama-3-70b-instruct", name: "llama-3-70b-instruct",
maxLength: 8192, maxLength: 8192,
}, },
"codellama-70b-instruct": {
id: "codellama-70b-instruct",
name: "codellama-70b-instruct",
maxLength: 16384,
},
"mistral-7b-instruct": {
id: "mistral-7b-instruct",
name: "mistral-7b-instruct",
maxLength: 16384,
},
"mixtral-8x7b-instruct": { "mixtral-8x7b-instruct": {
id: "mixtral-8x7b-instruct", id: "mixtral-8x7b-instruct",
name: "mixtral-8x7b-instruct", name: "mixtral-8x7b-instruct",
maxLength: 16384, maxLength: 16384,
}, },
"mixtral-8x22b-instruct": {
id: "mixtral-8x22b-instruct",
name: "mixtral-8x22b-instruct",
maxLength: 16384,
},
}; };
module.exports.MODELS = MODELS; module.exports.MODELS = MODELS;

View File

@ -1,12 +1,9 @@
| Model | Parameter Count | Context Length | Model Type | | Model | Parameter Count | Context Length | Model Type |
| :-------------------- | :-------------- | :------------- | :-------------- | | :--------------------------------- | :-------------- | :------------- | :-------------- |
| `sonar-small-chat` | 7B | 16384 | Chat Completion | | `llama-3-sonar-small-32k-online`\* | 8B | 28,000 | Chat Completion |
| `sonar-small-online` | 7B | 12000 | Chat Completion | | `llama-3-sonar-small-32k-chat` | 8B | 32,768 | Chat Completion |
| `sonar-medium-chat` | 8x7B | 16384 | Chat Completion | | `llama-3-sonar-large-32k-online`\* | 70B | 28,000 | Chat Completion |
| `sonar-medium-online` | 8x7B | 12000 | Chat Completion | | `llama-3-sonar-large-32k-chat` | 70B | 32,768 | Chat Completion |
| `llama-3-8b-instruct` | 8B | 8192 | Chat Completion | | `llama-3-8b-instruct` | 8B | 8,192 | Chat Completion |
| `llama-3-70b-instruct` | 70B | 8192 | Chat Completion | | `llama-3-70b-instruct` | 70B | 8,192 | Chat Completion |
| `codellama-70b-instruct` | 70B | 16384 | Chat Completion | | `mixtral-8x7b-instruct` | 8x7B | 16,384 | Chat Completion |
| `mistral-7b-instruct` [1] | 7B | 16384 | Chat Completion |
| `mixtral-8x7b-instruct` | 8x7B | 16384 | Chat Completion |
| `mixtral-8x22b-instruct` | 8x22B | 16384 | Chat Completion |

View File

@ -8,7 +8,7 @@
// copy outputs into the export in ../models.js // copy outputs into the export in ../models.js
// Update the date below if you run this again because Perplexity added new models. // Update the date below if you run this again because Perplexity added new models.
// Last Collected: Apr 25, 2024 // Last Collected: Jul 19, 2024
import fs from "fs"; import fs from "fs";
@ -23,7 +23,7 @@ function parseChatModels() {
.slice(1, -1) .slice(1, -1)
.map((text) => text.trim()); .map((text) => text.trim());
model = model.replace(/`|\s*\[\d+\]\s*/g, ""); model = model.replace(/`|\s*\[\d+\]\s*/g, "");
const maxLength = Number(contextLength.replace(/\s*\[\d+\]\s*/g, "")); const maxLength = Number(contextLength.replace(/[^\d]/g, ""));
if (model && maxLength) { if (model && maxLength) {
models[model] = { models[model] = {
id: model, id: model,