θΏ™ζ˜―indexlocζδΎ›ηš„ζœεŠ‘οΌŒδΈθ¦θΎ“ε…₯任何密码
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion frontend/src/hooks/useGetProvidersModels.js
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ function groupModels(models) {
}, {});
}

const groupedProviders = ["togetherai", "openai"];
const groupedProviders = ["togetherai", "openai", "openrouter"];
export default function useGetProviderModels(provider = null) {
const [defaultModels, setDefaultModels] = useState([]);
const [customModels, setCustomModels] = useState([]);
Expand Down
3 changes: 2 additions & 1 deletion server/storage/models/.gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
Xenova
downloaded/*
!downloaded/.placeholder
!downloaded/.placeholder
openrouter
100 changes: 89 additions & 11 deletions server/utils/AiProviders/openRouter/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,19 @@ const {
writeResponseChunk,
clientAbortedHandler,
} = require("../../helpers/chat/responses");

function openRouterModels() {
const { MODELS } = require("./models.js");
return MODELS || {};
}
const fs = require("fs");
const path = require("path");
const { safeJsonParse } = require("../../http");

class OpenRouterLLM {
constructor(embedder = null, modelPreference = null) {
const { Configuration, OpenAIApi } = require("openai");
if (!process.env.OPENROUTER_API_KEY)
throw new Error("No OpenRouter API key was set.");

this.basePath = "https://openrouter.ai/api/v1";
const config = new Configuration({
basePath: "https://openrouter.ai/api/v1",
basePath: this.basePath,
apiKey: process.env.OPENROUTER_API_KEY,
baseOptions: {
headers: {
Expand All @@ -38,6 +37,81 @@ class OpenRouterLLM {

this.embedder = !embedder ? new NativeEmbedder() : embedder;
this.defaultTemp = 0.7;

const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "openrouter")
: path.resolve(__dirname, `../../../storage/models/openrouter`)
);
fs.mkdirSync(cacheFolder, { recursive: true });
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
}

log(text, ...args) {
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
}

async init() {
await this.#syncModels();
return this;
}

// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
// from the current date. If it is, then we will refetch the API so that all the models are up
// to date.
#cacheIsStale() {
const MAX_STALE = 6.048e8; // 1 Week in MS
if (!fs.existsSync(this.cacheAtPath)) return true;
const now = Number(new Date());
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
return now - timestampMs > MAX_STALE;
}

// The OpenRouter model API has a lot of models, so we cache this locally in the directory
// as if the cache directory JSON file is stale or does not exist we will fetch from API and store it.
// This might slow down the first request, but we need the proper token context window
// for each model and this is a constructor property - so we can really only get it if this cache exists.
// We used to have this as a chore, but given there is an API to get the info - this makes little sense.
async #syncModels() {
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
return false;

this.log(
"Model cache is not present or stale. Fetching from OpenRouter API."
);
await fetch(`${this.basePath}/models`, {
method: "GET",
headers: {
"Content-Type": "application/json",
},
})
.then((res) => res.json())
.then(({ data = [] }) => {
const models = {};
data.forEach((model) => {
models[model.id] = {
id: model.id,
name: model.name,
organization:
model.id.split("/")[0].charAt(0).toUpperCase() +
model.id.split("/")[0].slice(1),
maxLength: model.context_length,
};
});
fs.writeFileSync(this.cacheModelPath, JSON.stringify(models), {
encoding: "utf-8",
});
fs.writeFileSync(this.cacheAtPath, String(Number(new Date())), {
encoding: "utf-8",
});
return models;
})
.catch((e) => {
console.error(e);
return {};
});
return;
}

#appendContext(contextTexts = []) {
Expand All @@ -52,21 +126,26 @@ class OpenRouterLLM {
);
}

allModelInformation() {
return openRouterModels();
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}

streamingEnabled() {
return "streamChat" in this && "streamGetChatCompletion" in this;
}

promptWindowLimit() {
const availableModels = this.allModelInformation();
const availableModels = this.models();
return availableModels[this.model]?.maxLength || 4096;
}

async isValidChatCompletionModel(model = "") {
const availableModels = this.allModelInformation();
await this.#syncModels();
const availableModels = this.models();
return availableModels.hasOwnProperty(model);
}

Expand Down Expand Up @@ -343,5 +422,4 @@ class OpenRouterLLM {

module.exports = {
OpenRouterLLM,
openRouterModels,
};
Loading