θΏ™ζ˜―indexlocζδΎ›ηš„ζœεŠ‘οΌŒδΈθ¦θΎ“ε…₯任何密码
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 1 addition & 27 deletions server/utils/AiProviders/anthropic/index.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
const { v4 } = require("uuid");
const { chatPrompt } = require("../../chats");
const {
writeResponseChunk,
clientAbortedHandler,
Expand Down Expand Up @@ -33,7 +32,7 @@ class AnthropicLLM {
}

streamingEnabled() {
return "streamChat" in this && "streamGetChatCompletion" in this;
return "streamGetChatCompletion" in this;
}

promptWindowLimit() {
Expand Down Expand Up @@ -110,31 +109,6 @@ class AnthropicLLM {
}
}

async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`Anthropic chat: ${this.model} is not valid for chat completion!`
);

const messages = await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
);

const streamRequest = await this.anthropic.messages.stream({
model: this.model,
max_tokens: 4096,
system: messages[0].content, // Strip out the system message
messages: messages.slice(1), // Pop off the system message
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
});
return streamRequest;
}

async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
Expand Down
63 changes: 1 addition & 62 deletions server/utils/AiProviders/azureOpenAi/index.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
const { AzureOpenAiEmbedder } = require("../../EmbeddingEngines/azureOpenAi");
const { chatPrompt } = require("../../chats");
const {
writeResponseChunk,
clientAbortedHandler,
Expand Down Expand Up @@ -45,7 +44,7 @@ class AzureOpenAiLLM {
}

streamingEnabled() {
return "streamChat" in this && "streamGetChatCompletion" in this;
return "streamGetChatCompletion" in this;
}

// Sure the user selected a proper value for the token limit
Expand Down Expand Up @@ -82,66 +81,6 @@ class AzureOpenAiLLM {
return { safe: true, reasons: [] };
}

async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!this.model)
throw new Error(
"No OPEN_MODEL_PREF ENV defined. This must the name of a deployment on your Azure account for an LLM chat model like GPT-3.5."
);

const messages = await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
);
const textResponse = await this.openai
.getChatCompletions(this.model, messages, {
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
})
.then((res) => {
if (!res.hasOwnProperty("choices"))
throw new Error("AzureOpenAI chat: No results!");
if (res.choices.length === 0)
throw new Error("AzureOpenAI chat: No results length!");
return res.choices[0].message.content;
})
.catch((error) => {
console.log(error);
throw new Error(
`AzureOpenAI::getChatCompletions failed with: ${error.message}`
);
});
return textResponse;
}

async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!this.model)
throw new Error(
"No OPEN_MODEL_PREF ENV defined. This must the name of a deployment on your Azure account for an LLM chat model like GPT-3.5."
);

const messages = await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
);
const stream = await this.openai.streamChatCompletions(
this.model,
messages,
{
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
}
);
return stream;
}

async getChatCompletion(messages = [], { temperature = 0.7 }) {
if (!this.model)
throw new Error(
Expand Down
53 changes: 1 addition & 52 deletions server/utils/AiProviders/gemini/index.js
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
const { chatPrompt } = require("../../chats");
const {
writeResponseChunk,
clientAbortedHandler,
Expand Down Expand Up @@ -48,7 +47,7 @@ class GeminiLLM {
}

streamingEnabled() {
return "streamChat" in this && "streamGetChatCompletion" in this;
return "streamGetChatCompletion" in this;
}

promptWindowLimit() {
Expand Down Expand Up @@ -118,32 +117,6 @@ class GeminiLLM {
return allMessages;
}

async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`Gemini chat: ${this.model} is not valid for chat completion!`
);

const compressedHistory = await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
chatHistory,
},
rawHistory
);

const chatThread = this.gemini.startChat({
history: this.formatMessages(compressedHistory),
});
const result = await chatThread.sendMessage(prompt);
const response = result.response;
const responseText = response.text();

if (!responseText) throw new Error("Gemini: No response could be parsed.");

return responseText;
}

async getChatCompletion(messages = [], _opts = {}) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
Expand All @@ -165,30 +138,6 @@ class GeminiLLM {
return responseText;
}

async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`Gemini chat: ${this.model} is not valid for chat completion!`
);

const compressedHistory = await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
chatHistory,
},
rawHistory
);

const chatThread = this.gemini.startChat({
history: this.formatMessages(compressedHistory),
});
const responseStream = await chatThread.sendMessageStream(prompt);
if (!responseStream.stream)
throw new Error("Could not stream response stream from Gemini.");

return responseStream.stream;
}

async streamGetChatCompletion(messages = [], _opts = {}) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
Expand Down
52 changes: 1 addition & 51 deletions server/utils/AiProviders/genericOpenAi/index.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { chatPrompt } = require("../../chats");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
Expand Down Expand Up @@ -53,7 +52,7 @@ class GenericOpenAiLLM {
}

streamingEnabled() {
return "streamChat" in this && "streamGetChatCompletion" in this;
return "streamGetChatCompletion" in this;
}

// Ensure the user set a value for the token limit
Expand Down Expand Up @@ -89,55 +88,6 @@ class GenericOpenAiLLM {
return { safe: true, reasons: [] };
}

async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
const textResponse = await this.openai.chat.completions
.create({
model: this.model,
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("GenericOpenAI chat: No results!");
if (result.choices.length === 0)
throw new Error("GenericOpenAI chat: No results length!");
return result.choices[0].message.content;
})
.catch((error) => {
throw new Error(
`GenericOpenAI::createChatCompletion failed with: ${error.message}`
);
});

return textResponse;
}

async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
const streamRequest = await this.openai.chat.completions.create({
model: this.model,
stream: true,
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
});
return streamRequest;
}

async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await this.openai.chat.completions
.create({
Expand Down
62 changes: 1 addition & 61 deletions server/utils/AiProviders/groq/index.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { chatPrompt } = require("../../chats");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
Expand Down Expand Up @@ -38,7 +37,7 @@ class GroqLLM {
}

streamingEnabled() {
return "streamChat" in this && "streamGetChatCompletion" in this;
return "streamGetChatCompletion" in this;
}

promptWindowLimit() {
Expand Down Expand Up @@ -91,65 +90,6 @@ class GroqLLM {
return { safe: true, reasons: [] };
}

async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Groq chat: ${this.model} is not valid for chat completion!`
);

const textResponse = await this.openai.chat.completions
.create({
model: this.model,
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("GroqAI chat: No results!");
if (result.choices.length === 0)
throw new Error("GroqAI chat: No results length!");
return result.choices[0].message.content;
})
.catch((error) => {
throw new Error(
`GroqAI::createChatCompletion failed with: ${error.message}`
);
});

return textResponse;
}

async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`GroqAI:streamChat: ${this.model} is not valid for chat completion!`
);

const streamRequest = await this.openai.chat.completions.create({
model: this.model,
stream: true,
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
});
return streamRequest;
}

async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
Expand Down
Loading