θΏ™ζ˜―indexlocζδΎ›ηš„ζœεŠ‘οΌŒδΈθ¦θΎ“ε…₯任何密码
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docker/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ GID='1000'

# LLM_PROVIDER='lmstudio'
# LMSTUDIO_BASE_PATH='http://your-server:1234/v1'
# LMSTUDIO_MODEL_PREF='Loaded from Chat UI' # this is a bug in LMStudio 0.2.17
# LMSTUDIO_MODEL_TOKEN_LIMIT=4096

# LLM_PROVIDER='localai'
Expand Down
80 changes: 80 additions & 0 deletions frontend/src/components/LLMSelection/LMStudioOptions/index.jsx
Original file line number Diff line number Diff line change
@@ -1,7 +1,14 @@
import { useEffect, useState } from "react";
import { Info } from "@phosphor-icons/react";
import paths from "@/utils/paths";
import System from "@/models/system";

export default function LMStudioOptions({ settings, showAlert = false }) {
const [basePathValue, setBasePathValue] = useState(
settings?.LMStudioBasePath
);
const [basePath, setBasePath] = useState(settings?.LMStudioBasePath);

return (
<div className="w-full flex flex-col">
{showAlert && (
Expand Down Expand Up @@ -35,8 +42,11 @@ export default function LMStudioOptions({ settings, showAlert = false }) {
required={true}
autoComplete="off"
spellCheck={false}
onChange={(e) => setBasePathValue(e.target.value)}
onBlur={() => setBasePath(basePathValue)}
/>
</div>
<LMStudioModelSelection settings={settings} basePath={basePath} />
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Token context window
Expand All @@ -57,3 +67,73 @@ export default function LMStudioOptions({ settings, showAlert = false }) {
</div>
);
}

function LMStudioModelSelection({ settings, basePath = null }) {
const [customModels, setCustomModels] = useState([]);
const [loading, setLoading] = useState(true);

useEffect(() => {
async function findCustomModels() {
if (!basePath || !basePath.includes("/v1")) {
setCustomModels([]);
setLoading(false);
return;
}
setLoading(true);
const { models } = await System.customModels("lmstudio", null, basePath);
setCustomModels(models || []);
setLoading(false);
}
findCustomModels();
}, [basePath]);

if (loading || customModels.length == 0) {
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="LMStudioModelPref"
disabled={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<option disabled={true} selected={true}>
{basePath?.includes("/v1")
? "-- loading available models --"
: "-- waiting for URL --"}
</option>
</select>
</div>
);
}

return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
Chat Model Selection
</label>
<select
name="LMStudioModelPref"
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{customModels.length > 0 && (
<optgroup label="Your loaded models">
{customModels.map((model) => {
return (
<option
key={model.id}
value={model.id}
selected={settings.LMStudioModelPref === model.id}
>
{model.id}
</option>
);
})}
</optgroup>
)}
</select>
</div>
);
}
1 change: 1 addition & 0 deletions server/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea

# LLM_PROVIDER='lmstudio'
# LMSTUDIO_BASE_PATH='http://your-server:1234/v1'
# LMSTUDIO_MODEL_PREF='Loaded from Chat UI' # this is a bug in LMStudio 0.2.17
# LMSTUDIO_MODEL_TOKEN_LIMIT=4096

# LLM_PROVIDER='localai'
Expand Down
1 change: 1 addition & 0 deletions server/models/systemSettings.js
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ const SystemSettings = {
? {
LMStudioBasePath: process.env.LMSTUDIO_BASE_PATH,
LMStudioTokenLimit: process.env.LMSTUDIO_MODEL_TOKEN_LIMIT,
LMStudioModelPref: process.env.LMSTUDIO_MODEL_PREF,

// For embedding credentials when lmstudio is selected.
OpenAiKey: !!process.env.OPEN_AI_KEY,
Expand Down
11 changes: 8 additions & 3 deletions server/utils/AiProviders/lmStudio/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,14 @@ class LMStudioLLM {
basePath: process.env.LMSTUDIO_BASE_PATH?.replace(/\/+$/, ""), // here is the URL to your LMStudio instance
});
this.lmstudio = new OpenAIApi(config);
// When using LMStudios inference server - the model param is not required so
// we can stub it here. LMStudio can only run one model at a time.
this.model = "model-placeholder";

// Prior to LMStudio 0.2.17 the `model` param was not required and you could pass anything
// into that field and it would work. On 0.2.17 LMStudio introduced multi-model chat
// which now has a bug that reports the server model id as "Loaded from Chat UI"
// and any other value will crash inferencing. So until this is patched we will
// try to fetch the `/models` and have the user set it, or just fallback to "Loaded from Chat UI"
// which will not impact users with <v0.2.17 and should work as well once the bug is fixed.
this.model = process.env.LMSTUDIO_MODEL_PREF || "Loaded from Chat UI";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
Expand Down
25 changes: 25 additions & 0 deletions server/utils/helpers/customModels.js
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ const SUPPORT_CUSTOM_MODELS = [
"mistral",
"perplexity",
"openrouter",
"lmstudio",
];

async function getCustomModels(provider = "", apiKey = null, basePath = null) {
Expand All @@ -33,6 +34,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getPerplexityModels();
case "openrouter":
return await getOpenRouterModels();
case "lmstudio":
return await getLMStudioModels(basePath);
default:
return { models: [], error: "Invalid provider for custom models" };
}
Expand Down Expand Up @@ -81,6 +84,28 @@ async function localAIModels(basePath = null, apiKey = null) {
return { models, error: null };
}

async function getLMStudioModels(basePath = null) {
try {
const { Configuration, OpenAIApi } = require("openai");
const config = new Configuration({
basePath: basePath || process.env.LMSTUDIO_BASE_PATH,
});
const openai = new OpenAIApi(config);
const models = await openai
.listModels()
.then((res) => res.data.data)
.catch((e) => {
console.error(`LMStudio:listModels`, e.message);
return [];
});

return { models, error: null };
} catch (e) {
console.error(`LMStudio:getLMStudioModels`, e.message);
return { models: [], error: "Could not fetch LMStudio Models" };
}
}

async function ollamaAIModels(basePath = null) {
let url;
try {
Expand Down
4 changes: 4 additions & 0 deletions server/utils/helpers/updateENV.js
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,10 @@ const KEY_MAPPING = {
envKey: "LMSTUDIO_BASE_PATH",
checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl],
},
LMStudioModelPref: {
envKey: "LMSTUDIO_MODEL_PREF",
checks: [],
},
LMStudioTokenLimit: {
envKey: "LMSTUDIO_MODEL_TOKEN_LIMIT",
checks: [nonZero],
Expand Down