From f3163e12865adf00cd539a0204ff5f8697aecd4f Mon Sep 17 00:00:00 2001 From: shatfield4 Date: Fri, 4 Oct 2024 15:47:12 -0700 Subject: [PATCH 1/2] support openai o1 models --- server/utils/AiProviders/modelMap.js | 4 ++++ server/utils/AiProviders/openAi/index.js | 18 ++++++++++++++---- server/utils/helpers/customModels.js | 2 +- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/server/utils/AiProviders/modelMap.js b/server/utils/AiProviders/modelMap.js index 99d78dc1425..84e480b317a 100644 --- a/server/utils/AiProviders/modelMap.js +++ b/server/utils/AiProviders/modelMap.js @@ -52,6 +52,10 @@ const MODEL_MAP = { "gpt-4-turbo-preview": 128_000, "gpt-4": 8_192, "gpt-4-32k": 32_000, + "o1-preview": 128_000, + "o1-preview-2024-09-12": 128_000, + "o1-mini": 128_000, + "o1-mini-2024-09-12": 128_000, }, deepseek: { "deepseek-chat": 128_000, diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js index b0e52dc2b98..8efe9917d4e 100644 --- a/server/utils/AiProviders/openAi/index.js +++ b/server/utils/AiProviders/openAi/index.js @@ -36,7 +36,8 @@ class OpenAiLLM { } streamingEnabled() { - return "streamGetChatCompletion" in this; + // o1 models do not support streaming + return !this.isO1Model() && "streamGetChatCompletion" in this; } static promptWindowLimit(modelName) { @@ -98,8 +99,12 @@ class OpenAiLLM { userPrompt = "", attachments = [], // This is the specific attachment for only this prompt }) { + // o1 Models do not support the "system" role + // in order to combat this, we can use the "user" role as a replacement for now + // https://community.openai.com/t/o1-models-do-not-support-system-role-in-chat-completion/953880 + const systemRole = this.isO1Model() ? "user" : "system"; const prompt = { - role: "system", + role: systemRole, content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [ @@ -122,7 +127,8 @@ class OpenAiLLM { .create({ model: this.model, messages, - temperature, + // o1 models only accept temperature 1 + temperature: this.isO1Model() ? 1 : temperature, }) .catch((e) => { throw new Error(e.message); @@ -143,7 +149,8 @@ class OpenAiLLM { model: this.model, stream: true, messages, - temperature, + // o1 models only accept temperature 1 + temperature: this.isO1Model() ? 1 : temperature, }); return streamRequest; } @@ -165,6 +172,9 @@ class OpenAiLLM { const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); } + isO1Model() { + return this.model.startsWith("o1"); + } } module.exports = { diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js index f061d35ff1f..3535a128312 100644 --- a/server/utils/helpers/customModels.js +++ b/server/utils/helpers/customModels.js @@ -124,7 +124,7 @@ async function openAiModels(apiKey = null) { }); const gpts = allModels - .filter((model) => model.id.startsWith("gpt")) + .filter((model) => model.id.startsWith("gpt") || model.id.startsWith("o1")) .filter( (model) => !model.id.includes("vision") && !model.id.includes("instruct") ) From cb5fbf059ebd0f51088f0e8ba0be2d165a361202 Mon Sep 17 00:00:00 2001 From: timothycarambat Date: Tue, 15 Oct 2024 19:40:25 -0700 Subject: [PATCH 2/2] Prevent O1 use for agents getter for isO1Model; --- .../AgentConfig/AgentModelSelection/index.jsx | 13 +++++++--- server/utils/AiProviders/openAi/index.js | 24 ++++++++++--------- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx index 4e0a9592c14..a16e1689c06 100644 --- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx +++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx @@ -6,12 +6,19 @@ import { useTranslation } from "react-i18next"; import { Link, useParams } from "react-router-dom"; // These models do NOT support function calling +// and therefore are not supported for agents. function supportedModel(provider, model = "") { if (provider !== "openai") return true; return ( - ["gpt-3.5-turbo-0301", "gpt-4-turbo-2024-04-09", "gpt-4-turbo"].includes( - model - ) === false + [ + "gpt-3.5-turbo-0301", + "gpt-4-turbo-2024-04-09", + "gpt-4-turbo", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + ].includes(model) === false ); } diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js index 8efe9917d4e..4f6bc2219b2 100644 --- a/server/utils/AiProviders/openAi/index.js +++ b/server/utils/AiProviders/openAi/index.js @@ -23,6 +23,14 @@ class OpenAiLLM { this.defaultTemp = 0.7; } + /** + * Check if the model is an o1 model. + * @returns {boolean} + */ + get isO1Model() { + return this.model.startsWith("o1"); + } + #appendContext(contextTexts = []) { if (!contextTexts || !contextTexts.length) return ""; return ( @@ -36,8 +44,8 @@ class OpenAiLLM { } streamingEnabled() { - // o1 models do not support streaming - return !this.isO1Model() && "streamGetChatCompletion" in this; + if (this.isO1Model) return false; + return "streamGetChatCompletion" in this; } static promptWindowLimit(modelName) { @@ -102,9 +110,8 @@ class OpenAiLLM { // o1 Models do not support the "system" role // in order to combat this, we can use the "user" role as a replacement for now // https://community.openai.com/t/o1-models-do-not-support-system-role-in-chat-completion/953880 - const systemRole = this.isO1Model() ? "user" : "system"; const prompt = { - role: systemRole, + role: this.isO1Model ? "user" : "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [ @@ -127,8 +134,7 @@ class OpenAiLLM { .create({ model: this.model, messages, - // o1 models only accept temperature 1 - temperature: this.isO1Model() ? 1 : temperature, + temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1 }) .catch((e) => { throw new Error(e.message); @@ -149,8 +155,7 @@ class OpenAiLLM { model: this.model, stream: true, messages, - // o1 models only accept temperature 1 - temperature: this.isO1Model() ? 1 : temperature, + temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1 }); return streamRequest; } @@ -172,9 +177,6 @@ class OpenAiLLM { const messageArray = this.constructPrompt(promptArgs); return await messageArrayCompressor(this, messageArray, rawHistory); } - isO1Model() { - return this.model.startsWith("o1"); - } } module.exports = {