-
-
Notifications
You must be signed in to change notification settings - Fork 5.4k
Anthropic claude 2 support #305
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
|
The PR seems to be adding support for a new language model provider, Anthropic, and also adding a new page for embedding preferences. Here are some suggestions:
if (error) {
// alert(`Failed to save LLM settings: ${error}`, "error");
showToast(`Failed to save LLM settings: ${error}`, "error");
return;
}
const stepMapping = {
"anthropic": 7,
"default": 2
};
goToStep(stepMapping[data.LLMProvider] || stepMapping.default);
const ProviderOption = ({ name, value, link, description, image }) => (
<LLMProviderOption
name={name}
value={value}
link={link}
description={description}
checked={llmChoice === value}
image={image}
onClick={updateLLMChoice}
/>
);
// Usage
<ProviderOption
name="OpenAI"
value="openai"
link="openai.com"
description="The standard option for most non-commercial use. Provides both chat and embedding."
image={OpenAiLogo}
/>
const InputField = ({ label, type, name, placeholder, defaultValue, required = true }) => (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-4">
{label}
</label>
<input
type={type}
name={name}
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder={placeholder}
defaultValue={defaultValue}
required={required}
autoComplete="off"
spellCheck={false}
/>
</div>
);
// Usage
<InputField
label="API Key"
type="password"
name="OpenAiKey"
placeholder="OpenAI API Key"
defaultValue={settings?.OpenAiKey ? "*".repeat(20) : ""}
/>The PR seems to be removing a large chunk of code related to API key management and adding new code for managing embedding preferences. Here are a few suggestions:
const data = {};
const form = new FormData(e.target);
for (var [key, value] of form.entries()) data[key] = value;with: const data = Object.fromEntries(new FormData(e.target));
const handleSubmit = async (e) => {
e.preventDefault();
setSaving(true);
setError(null);
const data = {};
const form = new FormData(e.target);
for (var [key, value] of form.entries()) data[key] = value;
try {
const { error } = await System.updateSystem(data);
setError(error);
setHasChanges(!!error);
} catch (err) {
setError(err.message);
setHasChanges(true);
} finally {
setSaving(false);
}
};
const vectorDBOptions = [
{
name: "Chroma",
value: "chroma",
link: "trychroma.com",
description: "Open source vector database you can host yourself or on the cloud.",
image: ChromaLogo,
},
// ...other options
];
// In render
{vectorDBOptions.map((option) => (
<VectorDBOption
key={option.value}
checked={vectorDB === option.value}
onClick={updateVectorChoice}
{...option}
/>
))}
class AnthropicLLM {
constructor(embedder = null) {
if (!process.env.ANTHROPIC_API_KEY)
throw new Error("No Anthropic API key was set.");
this.anthropic = this.initializeAnthropicAI();
// rest of the code...
}
initializeAnthropicAI() {
const AnthropicAI = require("@anthropic-ai/sdk");
return new AnthropicAI({
apiKey: process.env.ANTHROPIC_API_KEY,
});
}
// rest of the code...
}
isValidChatModel(modelName = "") {
const validModels = process.env.VALID_MODELS.split(",");
return validModels.includes(modelName);
}
async createCompletion(prompt) {
const model = process.env.ANTHROPIC_MODEL_PREF || "claude-2";
if (!this.isValidChatModel(model))
throw new Error(
`Anthropic chat: ${model} is not valid for chat completion!`
);
return await this.anthropic.completions
.create({
model: "claude-2",
max_tokens_to_sample: 300,
prompt,
})
.then((res) => {
const { completion } = res;
const re =
/(?:<anythingllmresponse>)([\s\S]*)(?:<\/anythingllmresponse>)/;
const response = completion.match(re)?.[1]?.trim();
if (!response)
throw new Error("Anthropic: No response could be parsed.");
return { content: response, error: null };
})
.catch((e) => {
return { content: null, error: e.message };
});
}
async sendChat(chatHistory = [], prompt, workspace = {}) {
const { content, error } = await this.createCompletion(this.constructPrompt({
systemPrompt: chatPrompt(workspace),
userPrompt: prompt,
chatHistory,
}));
if (error) throw new Error(error);
return content;
}
async getChatCompletion(prompt = "", _opts = {}) {
const { content, error } = await this.createCompletion(prompt);
if (error) throw new Error(error);
return content;
}
function getEmbeddingEngineSelection() {
const engineSelection = process.env.EMBEDDING_ENGINE;
const Embedder = require(`../../EmbeddingEngines/${engineSelection}`);
return new Embedder();
}The PR seems to be adding support for a new language model provider, 'anthropic', in the environment configuration. This is a good addition, but there are a few things that could be improved:
Here's a suggestion on how you could improve the code: ######## LLM API SElECTION ################
-# LLM_PROVIDER='openai'
+# Uncomment the provider you want to use
+# LLM_PROVIDER='openai'
# OPEN_AI_KEY=
-# OPEN_MODEL_PREF='gpt-3.5-turbo'
+# OPEN_MODEL_PREF='gpt-3.5-turbo'
# LLM_PROVIDER='azure'
# AZURE_OPENAI_ENDPOINT=
# AZURE_OPENAI_KEY=
# OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model.
# EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model.
+# LLM_PROVIDER='anthropic'
+# ANTHROPIC_API_KEY= # Add your anthropic API key here
+# ANTHROPIC_MODEL_PREF='claude-2'
######## Embedding Provider Selection ########
+# Uncomment the provider you want to use for embeddings
+# EMBEDDING_ENGINE='openai'
+# OPEN_AI_KEY= # Add your OpenAI key hereThis way, it's clear to the user that they need to uncomment the provider they want to use and where to add their API keys.
Here are the suggested changes: "dependencies": {
- "@anthropic-ai/sdk": "^0.8.1",
+ "@anthropic-ai/sdk": "0.8.1",
"@azure/openai": "^1.0.0-beta.3",
"@googleapis/youtube": "^9.0.0",
"@pinecone-database/pinecone": "^0.1.6",
"@prisma/client": "5.3.0",
"@qdrant/js-client-rest": "^1.4.0",
"archiver": "^5.3.1",
"bcrypt": "^5.1.0",
"body-parser": "^1.20.2",
"uuid-apikey": "^1.5.3",
"vectordb": "0.1.12",
"weaviate-ts-client": "^1.4.0"
},
"devDependencies": {
@@ -59,4 +60,4 @@
"nodemon": "^2.0.22",
"prettier": "^2.4.1"
}
-}
\ No newline at end of file
+}
+ |
move embedding selector to general util
* WIP Anythropic support for chat, chat and query w/context * Add onboarding support for Anthropic * cleanup * fix Anthropic answer parsing move embedding selector to general util
resolves #157
Moves LLMs and Embedding into two different classes with inheritance where applicable
New Anthropic class to support calls made
Anthropic response parser using XML tags as suggested by docs.
Onboarding Support and Flow change depending on LLM selection
Sidebar item for embedding provider - empty if not needed
Remove LegacySettings folder
Simplify chat interface inside of vector stores