θΏ™ζ˜―indexlocζδΎ›ηš„ζœεŠ‘οΌŒδΈθ¦θΎ“ε…₯任何密码
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 3 additions & 6 deletions BARE_METAL.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
> You are fully responsible for securing your deployment and data in this mode.
> **Any issues** experienced from bare-metal or non-containerized deployments will be **not** answered or supported.

Here you can find the scripts and known working process to run AnythingLLM outside of a Docker container. This method of deployment is preferable for those using local LLMs and want native performance on their devices.
Here you can find the scripts and known working process to run AnythingLLM outside of a Docker container.

### Minimum Requirements
> [!TIP]
Expand Down Expand Up @@ -47,20 +47,17 @@ AnythingLLM is comprised of three main sections. The `frontend`, `server`, and `
2. Copy `frontend/dist` to `server/public` - `cp -R frontend/dist server/public`.
This should create a folder in `server` named `public` which contains a top level `index.html` file and various other files/folders.

_(optional)_ Build native LLM support if using `native` as your LLM.
`cd server && npx --no node-llama-cpp download`

3. Migrate and prepare your database file.
```
cd server && npx prisma generate --schema=./prisma/schema.prisma
cd server && npx prisma migrate deploy --schema=./prisma/schema.prisma
```

4. Boot the server in production
`cd server && NODE_ENV=production node index.js &`
`cd server && NODE_ENV=production node index.js &`

5. Boot the collection in another process
`cd collector && NODE_ENV=production node index.js &`
`cd collector && NODE_ENV=production node index.js &`

AnythingLLM should now be running on `http://localhost:3001`!

Expand Down
9 changes: 2 additions & 7 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ RUN yarn build && \
rm -rf /tmp/frontend-build
WORKDIR /app

# Install server layer & build node-llama-cpp
# Install server layer
# Also pull and build collector deps (chromium issues prevent bad bindings)
FROM build AS backend-build
COPY ./server /app/server/
Expand All @@ -139,14 +139,9 @@ WORKDIR /app
# Install collector dependencies
COPY ./collector/ ./collector/
WORKDIR /app/collector
ENV PUPPETEER_DOWNLOAD_BASE_URL=https://storage.googleapis.com/chrome-for-testing-public
ENV PUPPETEER_DOWNLOAD_BASE_URL=https://storage.googleapis.com/chrome-for-testing-public
RUN yarn install --production --network-timeout 100000 && yarn cache clean

# Compile Llama.cpp bindings for node-llama-cpp for this operating system.
# Creates appropriate bindings for the OS
USER root
WORKDIR /app/server
RUN npx --no node-llama-cpp download
WORKDIR /app
USER anythingllm

Expand Down
102 changes: 0 additions & 102 deletions frontend/src/components/LLMSelection/NativeLLMOptions/index.jsx

This file was deleted.

2 changes: 0 additions & 2 deletions frontend/src/hooks/useGetProvidersModels.js
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import { useEffect, useState } from "react";
// Providers which cannot use this feature for workspace<>model selection
export const DISABLED_PROVIDERS = [
"azure",
"native",
"textgenwebui",
"generic-openai",
"bedrock",
Expand Down Expand Up @@ -47,7 +46,6 @@ const PROVIDER_DEFAULT_MODELS = {
fireworksai: [],
"nvidia-nim": [],
groq: [],
native: [],
cohere: [
"command-r",
"command-r-plus",
Expand Down
13 changes: 0 additions & 13 deletions frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import NovitaLLMOptions from "@/components/LLMSelection/NovitaLLMOptions";
Expand Down Expand Up @@ -290,16 +289,6 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Run xAI's powerful LLMs like Grok-2 and more.",
requiredConfig: ["XAIApiKey", "XAIModelPref"],
},

{
name: "Native",
value: "native",
logo: AnythingLLMIcon,
options: (settings) => <NativeLLMOptions settings={settings} />,
description:
"Use a downloaded custom Llama model for chatting on this AnythingLLM instance.",
requiredConfig: [],
},
];

export default function GeneralLLMPreference() {
Expand All @@ -312,7 +301,6 @@ export default function GeneralLLMPreference() {
const [selectedLLM, setSelectedLLM] = useState(null);
const [searchMenuOpen, setSearchMenuOpen] = useState(false);
const searchInputRef = useRef(null);
const isHosted = window.location.hostname.includes("useanything.com");
const { t } = useTranslation();

const handleSubmit = async (e) => {
Expand Down Expand Up @@ -449,7 +437,6 @@ export default function GeneralLLMPreference() {
</div>
<div className="flex-1 pl-4 pr-2 flex flex-col gap-y-1 overflow-y-auto white-scrollbar pb-4">
{filteredLLMs.map((llm) => {
if (llm.value === "native" && isHosted) return null;
return (
<LLMItem
key={llm.name}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,13 +105,6 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: OllamaLogo,
},
native: {
name: "Custom Llama Model",
description: [
"Your model and chats are only accessible on this AnythingLLM instance",
],
logo: AnythingLLMIcon,
},
togetherai: {
name: "TogetherAI",
description: [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions";
import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
import MistralOptions from "@/components/LLMSelection/MistralOptions";
Expand Down Expand Up @@ -247,14 +246,6 @@ const LLMS = [
options: (settings) => <XAILLMOptions settings={settings} />,
description: "Run xAI's powerful LLMs like Grok-2 and more.",
},
{
name: "Native",
value: "native",
logo: AnythingLLMIcon,
options: (settings) => <NativeLLMOptions settings={settings} />,
description:
"Use a downloaded custom Llama model for chatting on this AnythingLLM instance.",
},
];

export default function LLMPreference({
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ const FREE_FORM_LLM_SELECTION = ["bedrock", "azure", "generic-openai"];
const NO_MODEL_SELECTION = ["default", "huggingface"];

// Some providers we just fully disable for ease of use.
const DISABLED_PROVIDERS = ["native"];
const DISABLED_PROVIDERS = [];

const LLM_DEFAULT = {
name: "System default",
Expand Down
4 changes: 0 additions & 4 deletions server/models/systemSettings.js
Original file line number Diff line number Diff line change
Expand Up @@ -490,10 +490,6 @@ const SystemSettings = {
GroqApiKey: !!process.env.GROQ_API_KEY,
GroqModelPref: process.env.GROQ_MODEL_PREF,

// Native LLM Keys
NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF,
NativeLLMTokenLimit: process.env.NATIVE_LLM_MODEL_TOKEN_LIMIT,

// HuggingFace Dedicated Inference
HuggingFaceLLMEndpoint: process.env.HUGGING_FACE_LLM_ENDPOINT,
HuggingFaceLLMAccessToken: !!process.env.HUGGING_FACE_LLM_API_KEY,
Expand Down
1 change: 0 additions & 1 deletion server/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@
"mssql": "^10.0.2",
"multer": "^1.4.5-lts.1",
"mysql2": "^3.9.8",
"node-llama-cpp": "^2.8.0",
"ollama": "^0.5.0",
"openai": "4.38.5",
"pg": "^8.11.5",
Expand Down
Loading