θΏ™ζ˜―indexlocζδΎ›ηš„ζœεŠ‘οΌŒδΈθ¦θΎ“ε…₯任何密码
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -5,21 +5,30 @@ import paths from "@/utils/paths";
import { useTranslation } from "react-i18next";
import { Link, useParams } from "react-router-dom";

// These models do NOT support function calling
// and therefore are not supported for agents.
/**
* These models do NOT support function calling
* or do not support system prompts
* and therefore are not supported for agents.
* @param {string} provider - The AI provider.
* @param {string} model - The model name.
* @returns {boolean} Whether the model is supported for agents.
*/
function supportedModel(provider, model = "") {
if (provider !== "openai") return true;
return (
[
"gpt-3.5-turbo-0301",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo",
"o1-preview",
"o1-preview-2024-09-12",
"o1-mini",
"o1-mini-2024-09-12",
].includes(model) === false
);
if (provider === "openai") {
return (
[
"gpt-3.5-turbo-0301",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo",
"o1-preview",
"o1-preview-2024-09-12",
"o1-mini",
"o1-mini-2024-09-12",
].includes(model) === false
);
}

return true;
}

export default function AgentModelSelection({
Expand Down
34 changes: 34 additions & 0 deletions server/utils/AiProviders/bedrock/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,20 @@ const { NativeEmbedder } = require("../../EmbeddingEngines/native");

// Docs: https://js.langchain.com/v0.2/docs/integrations/chat/bedrock_converse
class AWSBedrockLLM {
/**
* These models do not support system prompts
* It is not explicitly stated but it is observed that they do not use the system prompt
* in their responses and will crash when a system prompt is provided.
* We can add more models to this list as we discover them or new models are added.
* We may want to extend this list or make a user-config if using custom bedrock models.
*/
noSystemPromptModels = [
"amazon.titan-text-express-v1",
"amazon.titan-text-lite-v1",
"cohere.command-text-v14",
"cohere.command-light-text-v14",
];

constructor(embedder = null, modelPreference = null) {
if (!process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID)
throw new Error("No AWS Bedrock LLM profile id was set.");
Expand Down Expand Up @@ -59,6 +73,22 @@ class AWSBedrockLLM {

for (const chat of chats) {
if (!roleToMessageMap.hasOwnProperty(chat.role)) continue;

// When a model does not support system prompts, we need to handle it.
// We will add a new message that simulates the system prompt via a user message and AI response.
// This will allow the model to respond without crashing but we can still inject context.
if (
this.noSystemPromptModels.includes(this.model) &&
chat.role === "system"
) {
this.#log(
`Model does not support system prompts! Simulating system prompt via Human/AI message pairs.`
);
langchainChats.push(new HumanMessage({ content: chat.content }));
langchainChats.push(new AIMessage({ content: "Okay." }));
continue;
}

const MessageClass = roleToMessageMap[chat.role];
langchainChats.push(new MessageClass({ content: chat.content }));
}
Expand All @@ -78,6 +108,10 @@ class AWSBedrockLLM {
);
}

#log(text, ...args) {
console.log(`\x1b[32m[AWSBedrock]\x1b[0m ${text}`, ...args);
}

streamingEnabled() {
return "streamGetChatCompletion" in this;
}
Expand Down