θΏ™ζ˜―indexlocζδΎ›ηš„ζœεŠ‘οΌŒδΈθ¦θΎ“ε…₯任何密码
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
{
"cSpell.words": [
"AIbitat",
"adoc",
"aibitat",
"AIbitat",
"anythingllm",
"Astra",
"Chartable",
Expand All @@ -20,6 +20,7 @@
"mbox",
"Milvus",
"Mintplex",
"moderations",
"Ollama",
"openai",
"opendocument",
Expand Down
2 changes: 1 addition & 1 deletion collector/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
"multer": "^1.4.5-lts.1",
"node-html-parser": "^6.1.13",
"officeparser": "^4.0.5",
"openai": "^3.2.1",
"openai": "4.38.5",
"pdf-parse": "^1.1.1",
"puppeteer": "~21.5.2",
"slugify": "^1.6.6",
Expand Down
43 changes: 25 additions & 18 deletions collector/utils/WhisperProviders/OpenAiWhisper.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,12 @@ const fs = require("fs");

class OpenAiWhisper {
constructor({ options }) {
const { Configuration, OpenAIApi } = require("openai");
const { OpenAI: OpenAIApi } = require("openai");
if (!options.openAiKey) throw new Error("No OpenAI API key was set.");

const config = new Configuration({
this.openai = new OpenAIApi({
apiKey: options.openAiKey,
});
this.openai = new OpenAIApi(config);
this.model = "whisper-1";
this.temperature = 0;
this.#log("Initialized.");
Expand All @@ -19,22 +18,30 @@ class OpenAiWhisper {
}

async processFile(fullFilePath) {
return await this.openai
.createTranscription(
fs.createReadStream(fullFilePath),
this.model,
undefined,
"text",
this.temperature
)
.then((res) => {
if (res.hasOwnProperty("data"))
return { content: res.data, error: null };
return { content: "", error: "No content was able to be transcribed." };
return await this.openai.audio.transcriptions
.create({
file: fs.createReadStream(fullFilePath),
model: this.model,
model: "whisper-1",
response_format: "text",
temperature: this.temperature,
})
.catch((e) => {
this.#log(`Could not get any response from openai whisper`, e.message);
return { content: "", error: e.message };
.then((response) => {
if (!response) {
return {
content: "",
error: "No content was able to be transcribed.",
};
}

return { content: response, error: null };
})
.catch((error) => {
this.#log(
`Could not get any response from openai whisper`,
error.message
);
return { content: "", error: error.message };
});
}
}
Expand Down
753 changes: 414 additions & 339 deletions collector/yarn.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion docker/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ GID='1000'

# LLM_PROVIDER='groq'
# GROQ_API_KEY=gsk_abcxyz
# GROQ_MODEL_PREF=llama2-70b-4096
# GROQ_MODEL_PREF=llama3-8b-8192

# LLM_PROVIDER='generic-openai'
# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
Expand Down
3 changes: 1 addition & 2 deletions frontend/src/components/LLMSelection/GroqAiOptions/index.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,11 @@ export default function GroqAiOptions({ settings }) {
</label>
<select
name="GroqModelPref"
defaultValue={settings?.GroqModelPref || "llama2-70b-4096"}
defaultValue={settings?.GroqModelPref || "llama3-8b-8192"}
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{[
"llama2-70b-4096",
"mixtral-8x7b-32768",
"llama3-8b-8192",
"llama3-70b-8192",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ function TogetherAiModelSelection({ settings }) {
<option
key={model.id}
value={model.id}
selected={settings?.OpenRouterModelPref === model.id}
selected={settings?.TogetherAiModelPref === model.id}
>
{model.name}
</option>
Expand Down
1 change: 0 additions & 1 deletion frontend/src/hooks/useGetProvidersModels.js
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ const PROVIDER_DEFAULT_MODELS = {
ollama: [],
togetherai: [],
groq: [
"llama2-70b-4096",
"mixtral-8x7b-32768",
"llama3-8b-8192",
"llama3-70b-8192",
Expand Down
2 changes: 1 addition & 1 deletion server/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea

# LLM_PROVIDER='groq'
# GROQ_API_KEY=gsk_abcxyz
# GROQ_MODEL_PREF=llama2-70b-4096
# GROQ_MODEL_PREF=llama3-8b-8192

# LLM_PROVIDER='generic-openai'
# GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
Expand Down
3 changes: 1 addition & 2 deletions server/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,7 @@
"multer": "^1.4.5-lts.1",
"node-html-markdown": "^1.3.0",
"node-llama-cpp": "^2.8.0",
"openai": "^3.2.1",
"openai-latest": "npm:openai@latest",
"openai": "4.38.5",
"pinecone-client": "^1.1.0",
"pluralize": "^8.0.0",
"posthog-node": "^3.1.1",
Expand Down
81 changes: 38 additions & 43 deletions server/utils/AiProviders/genericOpenAi/index.js
Original file line number Diff line number Diff line change
@@ -1,21 +1,22 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { chatPrompt } = require("../../chats");
const { handleDefaultStreamResponse } = require("../../helpers/chat/responses");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");

class GenericOpenAiLLM {
constructor(embedder = null, modelPreference = null) {
const { Configuration, OpenAIApi } = require("openai");
const { OpenAI: OpenAIApi } = require("openai");
if (!process.env.GENERIC_OPEN_AI_BASE_PATH)
throw new Error(
"GenericOpenAI must have a valid base path to use for the api."
);

this.basePath = process.env.GENERIC_OPEN_AI_BASE_PATH;
const config = new Configuration({
basePath: this.basePath,
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: process.env.GENERIC_OPEN_AI_API_KEY ?? null,
});
this.openai = new OpenAIApi(config);
this.model =
modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
if (!this.model)
Expand Down Expand Up @@ -89,8 +90,8 @@ class GenericOpenAiLLM {
}

async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
const textResponse = await this.openai
.createChatCompletion({
const textResponse = await this.openai.chat.completions
.create({
model: this.model,
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
Expand All @@ -103,13 +104,12 @@ class GenericOpenAiLLM {
rawHistory
),
})
.then((json) => {
const res = json.data;
if (!res.hasOwnProperty("choices"))
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("GenericOpenAI chat: No results!");
if (res.choices.length === 0)
if (result.choices.length === 0)
throw new Error("GenericOpenAI chat: No results length!");
return res.choices[0].message.content;
return result.choices[0].message.content;
})
.catch((error) => {
throw new Error(
Expand All @@ -121,29 +121,26 @@ class GenericOpenAiLLM {
}

async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
const streamRequest = await this.openai.createChatCompletion(
{
model: this.model,
stream: true,
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
},
{ responseType: "stream" }
);
const streamRequest = await this.openai.chat.completions.create({
model: this.model,
stream: true,
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
n: 1,
messages: await this.compressMessages(
{
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
},
rawHistory
),
});
return streamRequest;
}

async getChatCompletion(messages = null, { temperature = 0.7 }) {
const { data } = await this.openai
.createChatCompletion({
const result = await this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
Expand All @@ -152,25 +149,23 @@ class GenericOpenAiLLM {
throw new Error(e.response.data.error.message);
});

if (!data.hasOwnProperty("choices")) return null;
return data.choices[0].message.content;
if (!result.hasOwnProperty("choices") || result.choices.length === 0)
return null;
return result.choices[0].message.content;
}

async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const streamRequest = await this.openai.createChatCompletion(
{
model: this.model,
stream: true,
messages,
temperature,
},
{ responseType: "stream" }
);
const streamRequest = await this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
});
return streamRequest;
}

handleStream(response, stream, responseProps) {
return handleDefaultStreamResponse(response, stream, responseProps);
return handleDefaultStreamResponseV2(response, stream, responseProps);
}

// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
Expand Down
Loading