diff --git a/README.md b/README.md
index e4407a7bf6..1945c5e671 100644
--- a/README.md
+++ b/README.md
@@ -101,6 +101,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
- [xAI](https://x.ai/)
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
+- [Gitee AI](https://ai.gitee.com/)
**Embedder models:**
diff --git a/frontend/src/components/LLMSelection/GiteeAIOptions/index.jsx b/frontend/src/components/LLMSelection/GiteeAIOptions/index.jsx
new file mode 100644
index 0000000000..5ef889d1bc
--- /dev/null
+++ b/frontend/src/components/LLMSelection/GiteeAIOptions/index.jsx
@@ -0,0 +1,99 @@
+import { useState, useEffect } from "react";
+import System from "@/models/system";
+
+export default function GiteeAIOptions({ settings }) {
+ const [inputValue, setInputValue] = useState(settings?.GiteeAIApiKey);
+ const [GiteeAIApiKey, setGiteeAIApiKey] = useState(settings?.GiteeAIApiKey);
+
+ return (
+
+
+
+ setInputValue(e.target.value)}
+ onBlur={() => setGiteeAIApiKey(inputValue)}
+ />
+
+ {!settings?.credentialsOnly && (
+
+ )}
+
+ );
+}
+
+function GiteeAIModelSelection({ apiKey, settings }) {
+ const [models, setModels] = useState([]);
+ const [loading, setLoading] = useState(true);
+
+ useEffect(() => {
+ async function findCustomModels() {
+ if (!apiKey) {
+ setModels([]);
+ setLoading(true);
+ return;
+ }
+
+ setLoading(true);
+ const { models } = await System.customModels(
+ "giteeai",
+ typeof apiKey === "boolean" ? null : apiKey
+ );
+ setModels(models || []);
+ setLoading(false);
+ }
+
+ findCustomModels();
+ }, [apiKey]);
+
+ if (loading) {
+ return (
+
+
+
+
+ );
+ }
+
+ return (
+
+
+
+
+ );
+}
diff --git a/frontend/src/media/llmprovider/giteeai.png b/frontend/src/media/llmprovider/giteeai.png
new file mode 100644
index 0000000000..a1ef5fdade
Binary files /dev/null and b/frontend/src/media/llmprovider/giteeai.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index f9aceec189..df13e7c480 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -32,6 +32,7 @@ import XAILogo from "@/media/llmprovider/xai.png";
import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
+import GiteeAILogo from "@/media/llmprovider/giteeai.png";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -61,6 +62,7 @@ import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
+import GiteeAIOptions from "@/components/LLMSelection/GiteeAIOptions/index.jsx";
import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@@ -315,6 +317,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Run xAI's powerful LLMs like Grok-2 and more.",
requiredConfig: ["XAIApiKey", "XAIModelPref"],
},
+ {
+ name: "GiteeAI",
+ value: "giteeai",
+ logo: GiteeAILogo,
+ options: (settings) => ,
+ description: "Run GiteeAI's powerful LLMs.",
+ requiredConfig: ["GiteeAIApiKey"],
+ },
];
export default function GeneralLLMPreference() {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index bc48209da9..dd171adfb7 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -38,6 +38,8 @@ import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import PGVectorLogo from "@/media/vectordbs/pgvector.png";
import DPAISLogo from "@/media/llmprovider/dpais.png";
+import GiteeAILogo from "@/media/llmprovider/giteeai.png";
+
import React, { useState, useEffect } from "react";
import paths from "@/utils/paths";
import { useNavigate } from "react-router-dom";
@@ -242,6 +244,13 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: DPAISLogo,
},
+ giteeai: {
+ name: "GiteeAI",
+ description: [
+ "Quickly experience the big models and explore the AI open source world ahead of others",
+ ],
+ logo: GiteeAILogo,
+ },
};
export const VECTOR_DB_PRIVACY = {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 02d97893a7..3637587c6a 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -27,6 +27,7 @@ import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
import CohereLogo from "@/media/llmprovider/cohere.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
+import GiteeAILogo from "@/media/llmprovider/giteeai.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
@@ -55,6 +56,7 @@ import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
+import GiteeAiOptions from "@/components/LLMSelection/GiteeAIOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import System from "@/models/system";
@@ -263,6 +265,13 @@ const LLMS = [
options: (settings) => ,
description: "Run xAI's powerful LLMs like Grok-2 and more.",
},
+ {
+ name: "Gitee AI",
+ value: "giteeai",
+ logo: GiteeAILogo,
+ options: (settings) => ,
+ description: "Run Gitee AI's powerful LLMs.",
+ },
];
export default function LLMPreference({
diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
index 6baae1ddee..0a8e9799df 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
@@ -31,6 +31,7 @@ const ENABLED_PROVIDERS = [
"xai",
"nvidia-nim",
"gemini",
+ "giteeai",
// TODO: More agent support.
// "cohere", // Has tool calling and will need to build explicit support
// "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested.
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index a7a3e752c6..c5ea4342db 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -560,6 +560,10 @@ const SystemSettings = {
DeepSeekApiKey: !!process.env.DEEPSEEK_API_KEY,
DeepSeekModelPref: process.env.DEEPSEEK_MODEL_PREF,
+ // GiteeAI API Keys
+ GiteeAIApiKey: !!process.env.GITEE_AI_API_KEY,
+ GiteeAIModelPref: process.env.GITEE_AI_MODEL_PREF,
+
// APIPie LLM API Keys
ApipieLLMApiKey: !!process.env.APIPIE_LLM_API_KEY,
ApipieLLMModelPref: process.env.APIPIE_LLM_MODEL_PREF,
diff --git a/server/utils/AiProviders/giteeai/index.js b/server/utils/AiProviders/giteeai/index.js
new file mode 100644
index 0000000000..b37cf40667
--- /dev/null
+++ b/server/utils/AiProviders/giteeai/index.js
@@ -0,0 +1,313 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+ LLMPerformanceMonitor,
+} = require("../../helpers/chat/LLMPerformanceMonitor");
+const { v4: uuidv4 } = require("uuid");
+const { MODEL_MAP } = require("../modelMap");
+const {
+ writeResponseChunk,
+ clientAbortedHandler,
+} = require("../../helpers/chat/responses");
+
+class GiteeAILLM {
+ constructor(embedder = null, modelPreference = null) {
+ if (!process.env.GITEE_AI_API_KEY)
+ throw new Error("No Gitee AI API key was set.");
+ const { OpenAI: OpenAIApi } = require("openai");
+
+ this.openai = new OpenAIApi({
+ apiKey: process.env.GITEE_AI_API_KEY,
+ baseURL: "https://ai.gitee.com/v1",
+ });
+ this.model = modelPreference || process.env.GITEE_AI_MODEL_PREF || "";
+ this.limits = {
+ history: this.promptWindowLimit() * 0.15,
+ system: this.promptWindowLimit() * 0.15,
+ user: this.promptWindowLimit() * 0.7,
+ };
+
+ this.embedder = embedder ?? new NativeEmbedder();
+ this.defaultTemp = 0.7;
+ this.log("Initialized with model:", this.model);
+ }
+
+ log(text, ...args) {
+ console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
+ }
+
+ #appendContext(contextTexts = []) {
+ if (!contextTexts || !contextTexts.length) return "";
+ return (
+ "\nContext:\n" +
+ contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")
+ );
+ }
+
+ streamingEnabled() {
+ return "streamGetChatCompletion" in this;
+ }
+
+ static promptWindowLimit(modelName) {
+ // return MODEL_MAP.giteeai[modelName] ?? 8192;
+ // TODO using external API data and caching it
+ return 8192;
+ }
+
+ promptWindowLimit() {
+ // return MODEL_MAP.giteeai[this.model] ?? 8192;
+ // TODO using external API data and caching it
+ return 8192;
+ }
+
+ async isValidChatCompletionModel(modelName = "") {
+ const models = await this.openai.models.list().catch(() => ({ data: [] }));
+ return models.data.some((model) => model.id === modelName);
+ }
+
+ constructPrompt({
+ systemPrompt = "",
+ contextTexts = [],
+ chatHistory = [],
+ userPrompt = "",
+ }) {
+ const prompt = {
+ role: "system",
+ content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+ };
+ return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+ }
+
+ /**
+ * Parses and prepends reasoning from the response and returns the full text response.
+ * @param {Object} response
+ * @returns {string}
+ */
+ #parseReasoningFromResponse({ message }) {
+ let textResponse = message?.content;
+ if (
+ !!message?.reasoning_content &&
+ message.reasoning_content.trim().length > 0
+ )
+ textResponse = `${message.reasoning_content}${textResponse}`;
+ return textResponse;
+ }
+
+ async getChatCompletion(messages = null, { temperature = 0.7 }) {
+ if (!(await this.isValidChatCompletionModel(this.model)))
+ throw new Error(
+ `GiteeAI chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const result = await LLMPerformanceMonitor.measureAsyncFunction(
+ this.openai.chat.completions
+ .create({
+ model: this.model,
+ messages,
+ temperature,
+ })
+ .catch((e) => {
+ throw new Error(e.message);
+ })
+ );
+
+ if (
+ !result?.output?.hasOwnProperty("choices") ||
+ result?.output?.choices?.length === 0
+ )
+ throw new Error(
+ `Invalid response body returned from GiteeAI: ${JSON.stringify(result.output)}`
+ );
+
+ return {
+ textResponse: this.#parseReasoningFromResponse(result.output.choices[0]),
+ metrics: {
+ prompt_tokens: result.output.usage.prompt_tokens || 0,
+ completion_tokens: result.output.usage.completion_tokens || 0,
+ total_tokens: result.output.usage.total_tokens || 0,
+ outputTps: result.output.usage.completion_tokens / result.duration,
+ duration: result.duration,
+ },
+ };
+ }
+
+ async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+ if (!(await this.isValidChatCompletionModel(this.model)))
+ throw new Error(
+ `GiteeAI chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
+ this.openai.chat.completions.create({
+ model: this.model,
+ stream: true,
+ messages,
+ temperature,
+ }),
+ messages,
+ false
+ );
+
+ return measuredStreamRequest;
+ }
+
+ // TODO: This is a copy of the generic handleStream function in responses.js
+ // to specifically handle the GiteeAI reasoning model `reasoning_content` field.
+ // When or if ever possible, we should refactor this to be in the generic function.
+ handleStream(response, stream, responseProps) {
+ const { uuid = uuidv4(), sources = [] } = responseProps;
+ let hasUsageMetrics = false;
+ let usage = {
+ completion_tokens: 0,
+ };
+
+ return new Promise(async (resolve) => {
+ let fullText = "";
+ let reasoningText = "";
+
+ // Establish listener to early-abort a streaming response
+ // in case things go sideways or the user does not like the response.
+ // We preserve the generated text but continue as if chat was completed
+ // to preserve previously generated content.
+ const handleAbort = () => {
+ stream?.endMeasurement(usage);
+ clientAbortedHandler(resolve, fullText);
+ };
+ response.on("close", handleAbort);
+
+ try {
+ for await (const chunk of stream) {
+ const message = chunk?.choices?.[0];
+ const token = message?.delta?.content;
+ const reasoningToken = message?.delta?.reasoning_content;
+
+ if (
+ chunk.hasOwnProperty("usage") && // exists
+ !!chunk.usage && // is not null
+ Object.values(chunk.usage).length > 0 // has values
+ ) {
+ if (chunk.usage.hasOwnProperty("prompt_tokens")) {
+ usage.prompt_tokens = Number(chunk.usage.prompt_tokens);
+ }
+
+ if (chunk.usage.hasOwnProperty("completion_tokens")) {
+ hasUsageMetrics = true; // to stop estimating counter
+ usage.completion_tokens = Number(chunk.usage.completion_tokens);
+ }
+ }
+
+ // Reasoning models will always return the reasoning text before the token text.
+ if (reasoningToken) {
+ // If the reasoning text is empty (''), we need to initialize it
+ // and send the first chunk of reasoning text.
+ if (reasoningText.length === 0) {
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: `${reasoningToken}`,
+ close: false,
+ error: false,
+ });
+ reasoningText += `${reasoningToken}`;
+ continue;
+ } else {
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: reasoningToken,
+ close: false,
+ error: false,
+ });
+ reasoningText += reasoningToken;
+ }
+ }
+
+ // If the reasoning text is not empty, but the reasoning token is empty
+ // and the token text is not empty we need to close the reasoning text and begin sending the token text.
+ if (!!reasoningText && !reasoningToken && token) {
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: ``,
+ close: false,
+ error: false,
+ });
+ fullText += `${reasoningText}`;
+ reasoningText = "";
+ }
+
+ if (token) {
+ fullText += token;
+ // If we never saw a usage metric, we can estimate them by number of completion chunks
+ if (!hasUsageMetrics) usage.completion_tokens++;
+ writeResponseChunk(response, {
+ uuid,
+ sources: [],
+ type: "textResponseChunk",
+ textResponse: token,
+ close: false,
+ error: false,
+ });
+ }
+
+ // LocalAi returns '' and others return null on chunks - the last chunk is not "" or null.
+ // Either way, the key `finish_reason` must be present to determine ending chunk.
+ if (
+ message?.hasOwnProperty("finish_reason") && // Got valid message and it is an object with finish_reason
+ message.finish_reason !== "" &&
+ message.finish_reason !== null
+ ) {
+ writeResponseChunk(response, {
+ uuid,
+ sources,
+ type: "textResponseChunk",
+ textResponse: "",
+ close: true,
+ error: false,
+ });
+ response.removeListener("close", handleAbort);
+ stream?.endMeasurement(usage);
+ resolve(fullText);
+ break; // Break streaming when a valid finish_reason is first encountered
+ }
+ }
+ } catch (e) {
+ console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
+ writeResponseChunk(response, {
+ uuid,
+ type: "abort",
+ textResponse: null,
+ sources: [],
+ close: true,
+ error: e.message,
+ });
+ stream?.endMeasurement(usage);
+ resolve(fullText); // Return what we currently have - if anything.
+ }
+ });
+ }
+
+ async embedTextInput(textInput) {
+ return await this.embedder.embedTextInput(textInput);
+ }
+
+ async embedChunks(textChunks = []) {
+ return await this.embedder.embedChunks(textChunks);
+ }
+
+ async compressMessages(promptArgs = {}, rawHistory = []) {
+ const { messageArrayCompressor } = require("../../helpers/chat");
+ const messageArray = this.constructPrompt(promptArgs);
+ return await messageArrayCompressor(this, messageArray, rawHistory);
+ }
+}
+
+module.exports = {
+ GiteeAILLM,
+};
diff --git a/server/utils/AiProviders/modelMap.js b/server/utils/AiProviders/modelMap.js
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js
index 6e069defd3..0a6b47c74e 100644
--- a/server/utils/agents/aibitat/index.js
+++ b/server/utils/agents/aibitat/index.js
@@ -828,6 +828,9 @@ ${this.getHistory({ to: route.to })
return new Providers.GeminiProvider({ model: config.model });
case "dpais":
return new Providers.DellProAiStudioProvider({ model: config.model });
+ case "giteeai":
+ return new Providers.GiteeAIProvider({ model: config.model });
+
default:
throw new Error(
`Unknown provider: ${config.provider}. Please use a valid provider.`
diff --git a/server/utils/agents/aibitat/providers/ai-provider.js b/server/utils/agents/aibitat/providers/ai-provider.js
index 5085fdcf5e..1310e1605a 100644
--- a/server/utils/agents/aibitat/providers/ai-provider.js
+++ b/server/utils/agents/aibitat/providers/ai-provider.js
@@ -244,6 +244,14 @@ class Provider {
apiKey: null,
...config,
});
+ case "giteeai":
+ return new ChatOpenAI({
+ configuration: {
+ baseURL: "https://ai.gitee.com/v1",
+ },
+ apiKey: process.env.GITEE_AI_API_KEY ?? null,
+ ...config,
+ });
default:
throw new Error(`Unsupported provider ${provider} for this task.`);
diff --git a/server/utils/agents/aibitat/providers/giteeai.js b/server/utils/agents/aibitat/providers/giteeai.js
new file mode 100644
index 0000000000..4967fc1c95
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/giteeai.js
@@ -0,0 +1,116 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+const { toValidNumber } = require("../../../http/index.js");
+
+class GiteeAIProvider extends InheritMultiple([Provider, UnTooled]) {
+ model;
+
+ constructor(config = {}) {
+ super();
+ const { model = "DeepSeek-R1" } = config;
+ this._client = new OpenAI({
+ baseURL: "https://ai.gitee.com/v1",
+ apiKey: process.env.GITEE_AI_API_KEY ?? null,
+ maxRetries: 3,
+ });
+ this.model = model;
+ this.verbose = true;
+ this.maxTokens = process.env.GiteeAI_MAX_TOKENS
+ ? toValidNumber(process.env.GiteeAI_MAX_TOKENS, 1024)
+ : 1024;
+ }
+
+ get client() {
+ return this._client;
+ }
+
+ async #handleFunctionCallChat({ messages = [] }) {
+ return await this.client.chat.completions
+ .create({
+ model: this.model,
+ temperature: 0,
+ messages,
+ max_tokens: this.maxTokens,
+ })
+ .then((result) => {
+ if (!result.hasOwnProperty("choices"))
+ throw new Error("GiteeAI: No results!");
+ if (result.choices.length === 0)
+ throw new Error("GiteeAI: No results length!");
+ return result.choices[0].message.content;
+ })
+ .catch((_) => {
+ return null;
+ });
+ }
+
+ /**
+ * Create a completion based on the received messages.
+ *
+ * @param messages A list of messages to send to the API.
+ * @param functions
+ * @returns The completion.
+ */
+ async complete(messages, functions = []) {
+ try {
+ let completion;
+ if (functions.length > 0) {
+ const { toolCall, text } = await this.functionCall(
+ messages,
+ functions,
+ this.#handleFunctionCallChat.bind(this)
+ );
+
+ if (toolCall !== null) {
+ this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+ this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+ return {
+ result: null,
+ functionCall: {
+ name: toolCall.name,
+ arguments: toolCall.arguments,
+ },
+ cost: 0,
+ };
+ }
+ completion = { content: text };
+ }
+
+ if (!completion?.content) {
+ this.providerLog(
+ "Will assume chat completion without tool call inputs."
+ );
+ const response = await this.client.chat.completions.create({
+ model: this.model,
+ messages: this.cleanMsgs(messages),
+ });
+ completion = response.choices[0].message;
+ }
+
+ // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
+ // from calling the exact same function over and over in a loop within a single chat exchange
+ // _but_ we should enable it to call previously used tools in a new chat interaction.
+ this.deduplicator.reset("runs");
+ return {
+ result: completion.content,
+ cost: 0,
+ };
+ } catch (error) {
+ throw error;
+ }
+ }
+
+ /**
+ * Get the cost of the completion.
+ *
+ * @param _usage The completion to get the cost for.
+ * @returns The cost of the completion.
+ */
+ getCost(_usage) {
+ return 0;
+ }
+}
+
+module.exports = GiteeAIProvider;
diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js
index d8c174862e..18b41ff8b5 100644
--- a/server/utils/agents/aibitat/providers/index.js
+++ b/server/utils/agents/aibitat/providers/index.js
@@ -23,6 +23,7 @@ const NvidiaNimProvider = require("./nvidiaNim.js");
const PPIOProvider = require("./ppio.js");
const GeminiProvider = require("./gemini.js");
const DellProAiStudioProvider = require("./dellProAiStudio.js");
+const GiteeAIProvider = require("./giteeai.js");
module.exports = {
OpenAIProvider,
@@ -50,4 +51,5 @@ module.exports = {
PPIOProvider,
GeminiProvider,
DellProAiStudioProvider,
+ GiteeAIProvider,
};
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index 915e5a59be..620757241a 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -199,6 +199,10 @@ class AgentHandler {
"Dell Pro AI Studio model must be set to use agents."
);
break;
+ case "giteeai":
+ if (!process.env.GITEE_AI_API_KEY)
+ throw new Error("GiteeAI API Key must be provided to use agents.");
+ break;
default:
throw new Error(
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index cff97ff888..916c768f3b 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -33,6 +33,7 @@ const SUPPORT_CUSTOM_MODELS = [
"gemini",
"ppio",
"dpais",
+ "giteeai",
];
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@@ -84,6 +85,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getPPIOModels(apiKey);
case "dpais":
return await getDellProAiStudioModels(basePath);
+ case "giteeai":
+ return await getGiteeAIModels(apiKey);
default:
return { models: [], error: "Invalid provider for custom models" };
}
@@ -553,6 +556,31 @@ async function getDeepSeekModels(apiKey = null) {
return { models, error: null };
}
+async function getGiteeAIModels(apiKey = null) {
+ const { OpenAI: OpenAIApi } = require("openai");
+ const openai = new OpenAIApi({
+ apiKey: apiKey || process.env.DEEPSEEK_API_KEY,
+ baseURL: "https://ai.gitee.com/v1",
+ });
+ const models = await openai.models
+ .list()
+ .then((results) => results.data)
+ .then((models) =>
+ models.map((model) => ({
+ id: model.id,
+ name: model.id,
+ organization: model.owned_by,
+ }))
+ )
+ .catch((e) => {
+ console.error(`GiteeAI:listModels`, e.message);
+ return [];
+ });
+
+ if (models.length > 0 && !!apiKey) process.env.DEEPSEEK_API_KEY = apiKey;
+ return { models, error: null };
+}
+
async function getXAIModels(_apiKey = null) {
const { OpenAI: OpenAIApi } = require("openai");
const apiKey =
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 2017c618fa..e140049b44 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -206,6 +206,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "dpais":
const { DellProAiStudioLLM } = require("../AiProviders/dellProAiStudio");
return new DellProAiStudioLLM(embedder, model);
+ case "giteeai":
+ const { GiteeAILLM } = require("../AiProviders/giteeai");
+ return new GiteeAILLM(embedder, model);
default:
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index b69c96417f..1bb7b732cb 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -672,6 +672,14 @@ const KEY_MAPPING = {
envKey: "PPIO_MODEL_PREF",
checks: [isNotEmpty],
},
+ GiteeAIApiKey: {
+ envKey: "GITEE_AI_API_KEY",
+ checks: [isNotEmpty],
+ },
+ GiteeAIModelPref: {
+ envKey: "GITEE_AI_MODEL_PREF",
+ checks: [isNotEmpty],
+ },
};
function isNotEmpty(input = "") {
@@ -780,6 +788,7 @@ function supportedLLM(input = "") {
"nvidia-nim",
"ppio",
"dpais",
+ "giteeai",
].includes(input);
return validSelection ? null : `${input} is not a valid LLM provider.`;
}