+
Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions amber.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,15 @@ secrets:
- name: AMBER_ANOTHERWEBSERVICE_NJF
sha256: 6b2b7498466801ea974e10a19de5c8c9deb5bc95ec98992a46a291d02a1f7b60
cipher: 7d7725504623b21f90a0fada17d05ecaf0e1bc4f709baabc9f325dd4f1d0091c97d32a5aa77534e9ce0b678797418701474a018b7ff0535bc7cd3e8ccfcf57e4760c512ff6e3082c4e94999beabe148898b1a4fdcba26746b292455f
- name: AMBER_FLUENT_ANTHROPIC_KEY_01
sha256: 8f66c5f92b94d9228e9d32b30a29f3848fcd4ad9f4ef1ec4d153559be015c3e9
cipher: da52538627a1a409458d749f43bf74138fd9aa9b0a8dc9e441ef15035aaae84f4b111cfa894ee9462231f90401177db64dc72c2c7dd8c38f79c36e61a10f592a4fcb1b973220c35854370ec57fdc62886d87398480cdd01aa75b7913712633147628606c9ad832b8b2e837937b7f6e70fd4e1615a23b3d72e5a26d2838c6e03728430032a4b8cbe16a3be07a98136cc191d3a56709a2b23efc76e066
- name: AMBER_FLUENT_GROQ_API_KEY_01
sha256: 89f7aeec36590e47a2eab221b2381dd70343ae36b13e8e17a9f6dc62f1cb80be
cipher: 6ff9c99f465f1354cc8a107d5cbbd1165be08dd1c0a77b67bbb22dd4866dcb2a2bb48622edeb0bfcc1f3b100b1a83871e6ea90dde44ee9b4909641af7bfd69790b45518884bb686cc22045441ba48d668d87b3893869bc25bf1c21777d2c132322349f387b8730b3
- name: AMBER_FLUENT_MISTRAL_KEY_01
sha256: 9731bc831969cfa36c45cbd7fb6b757c9cfbba91a96a64a847f9b6205d84fd2a
cipher: 1c0bd5f0a66ee85221bbd179cf342b191105e36413a90d3740a6e5c5ef716939df7144ebb1a1bbb9eb068664c76c335bc455c6bfe6b3ebb7bc40fdcea97cb4e4ce46eef06d95f127ecafe3a985a0ca56
- name: AMBER_FLUENT_OPEN_API_KEY_01
sha256: 50955d905232e8fb90d164e87fbf4a3fd49186a3220ef1b3197bf64725340483
cipher: 04705361f04fa1e21d36b444ca36a8adfcf749564625cc3752bf3908a96aa00168683fe6855c9d14f20d082e3a9fa57e7493a1540c3932697c7242c8be5e0cbb4612b19b90933f6661e62eaa57349d54713f0d32e943d2ce6c6fdcdcaa350ba7ac7b04
Expand All @@ -19,6 +25,9 @@ secrets:
- name: AMBER_FLUENT_SESSION_ID_01
sha256: d215bada471ed79a9ec9baf5e8d23a57c72aa195e37e81bea3050690340a2dd9
cipher: 14d063487e9602ab97aae558ec5f2c2528ec667e5fbce9caca0c2586baf6cd466b49e90355fd19d67b19323cd37156679d1497684021c40ebc70dd
- name: AMBER_FLUENT_ZEP_MEMORY_KEY_01
sha256: 136bb9b51f44dc40238834475695f11c40970fccdeb4ab27766e6f7a405c32f1
cipher: 7b5fc863ce222f5baae691a2dae429026f373e2f6729300902dca3ec147bb86ecea0115e4e0427e5ae50911ae032e7bc910c89d4a97b3807564b593b0d8dd1082eb8123a8c6d75b4e8cc4907d1baea92d1cc0098926a92573a759e0037f03fe41df653fad556301e81b734e2f1e7d2923ad75e1a5f1d818f7aeda805aa88b533b2e7b21d785d99182bf436936011c64e4063be5f609e0ba57c43d456cee39e3f3853b368cae7b2138cf75a87d08129cf99a7a3e1ef222f24bad87b992b9ee8e3c57721a481
- name: AMBER_LEONARDO_AI_KINO_XL_MODEL_ID
sha256: 3b582f348b95a61ddbaa7ac3167a3f04f8e5e3178f0000f84ca8f0391f8aa1c1
cipher: f48d253696fb03637462505e578cc8b8f759f24fe3b6a3c579237c1288c61d280dc457baf44dd39764aa195230bf748a0f824e81bdd00f918ab610128f235ba4591d69f7bdc4fd1347c43d54f8440b5447c29775
Expand Down
104 changes: 102 additions & 2 deletions fluent_cli/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,30 @@
"openAIApiKey": "AMBER_FLUENT_OPEN_API_KEY_01",
"searchApiKey": "AMBER_FLUENT_SEARCHAPI_KEY_ID_01",
"allowImageUploads": true,
"systemMessage": "You are a helpful assistant"
"systemMessage": "You are a helpful assistant",
"openAIToolAgent_0": {
"systemMessage": "You are a helpful assistant"
}
},
"timeout_ms": 50000
},
{
"name": "GPT4ImageUpload",
"protocol": "http",
"hostname": "127.0.0.1",
"port": 3000,
"chat_id": "c6a6de48-5d8e-4101-9a81-508c5b3a5f76",
"request_path": "/api/v1/prediction/",
"sessionId": "",
"bearer_token": "AMBER_LOCAL_FLUENT_DEFAULT_KEY",
"overrideConfig": {
"chainName": "GPT4ImageUploadChain",
"sessionId": "AMBER_FLUENT_SESSION_ID_01",
"openAIApiKey": "AMBER_FLUENT_OPEN_API_KEY_01",
"searchApiKey": "AMBER_FLUENT_SEARCHAPI_KEY_ID_01",
"allowImageUploads": true,
"systemMessagePrompt": "You only speak spanish",
"temperature": 0.7
},
"timeout_ms": 50000
},
Expand Down Expand Up @@ -50,7 +73,84 @@
"sessionId": "AMBER_FLUENT_SESSION_ID_01",
"groqApiKey": "AMBER_FLUENT_GROQ_API_KEY_01",
"serpApiKey": "AMBER_FLUENT_SERPAPI_KEY_01",
"SystemMessagePrompt": "You are a helpful assistant"
"SystemMessagePrompt": "You are a helpful assistant",
"temperature": 0.2,
"modelName": "mixtral-8x7b-32768"
},
"timeout_ms": 50000
},
{
"name": "GroqLLama370b8192AgentAnotherWebService",
"protocol": "https",
"hostname": "container-qygwpcc.containers.anotherwebservice.com",
"port": 3000,
"chat_id": "e2110934-8c0a-4e0b-92f2-bd40666f8b7d",
"request_path": "/api/v1/prediction/",
"sessionId": "",
"bearer_token": "AMBER_ANOTHERWEBSERVICE_NJF",
"overrideConfig": {
"sessionId": "AMBER_FLUENT_SESSION_ID_01",
"groqApiKey": "AMBER_FLUENT_GROQ_API_KEY_01",
"serpApiKey": "AMBER_FLUENT_SERPAPI_KEY_01",
"SystemMessagePrompt": "You are a helpful assistant",
"temperature": 0.5,
"modelName":"llama3-70b-8192",
"memoryKey": "AMBER_FLUENT_SESSION_ID_01"
},
"timeout_ms": 50000
},
{
"name": "MistralLargeToolAgentAnowtherWebService",
"protocol": "https",
"hostname": "container-qygwpcc.containers.anotherwebservice.com",
"port": 3000,
"chat_id": "173d8f08-ba2a-49a1-91d9-8769979081a5",
"request_path": "/api/v1/prediction/",
"sessionId": "",
"bearer_token": "AMBER_ANOTHERWEBSERVICE_NJF",
"overrideConfig": {
"sessionId": "AMBER_FLUENT_SESSION_ID_01",
"mistralAIAPIKey": "AMBER_FLUENT_MISTRAL_KEY_01",
"serpApiKey": "AMBER_FLUENT_SERPAPI_KEY_01",
"apiKey": "AMBER_FLUENT_ZEP_MEMORY_KEY_01",
"SystemMessagePrompt": "You are a helpful assistant",
"temperature": 0.2,
"memoryKey": "chat_history",
"memoryType": "perpetual",
"aiPrefix": "ai",
"humanPrefix": "human",
"inputKey": "input",
"outputKey": "output"
},
"timeout_ms": 50000
},
{
"name": "SonnetXMLAgentAnowtherWebService",
"protocol": "https",
"hostname": "container-qygwpcc.containers.anotherwebservice.com",
"port": 3000,
"chat_id": "8ea46fa9-4aef-4184-a399-c588c576d148",
"request_path": "/api/v1/prediction/",
"sessionId": "",
"bearer_token": "AMBER_ANOTHERWEBSERVICE_NJF",
"overrideConfig": {
"sessionId": "AMBER_FLUENT_SESSION_ID_01",
"anthropicApiKey": "AMBER_FLUENT_ANTHROPIC_KEY_01",
"stripNewLines": true,
"modelName": {
"chatAnthropic_0": "claude-3-sonnet-20240229",
"chatOpenAI_0": "gpt-3.5-turbo-16k",
"openAIEmbeddings_0": "text-embedding-3-small"
},
"openAIApiKey": {
"openAIEmbeddings_0": "AMBER_FLUENT_OPEN_API_KEY_01",
"chatOpenAI_0": "AMBER_FLUENT_OPEN_API_KEY_01"
},

"serpApiKey": "AMBER_FLUENT_SERPAPI_KEY_01",
"SystemMessage": "You are a helpful assistant. Help the user answer any questions.\n\nYou have access to the following tools:\n\n{tools}\n\nIn order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>\nFor example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:\n\n<tool>search</tool><tool_input>weather in SF</tool_input>\n<observation>64 degrees</observation>\n\nWhen you are done, respond with a final answer between <final_answer></final_answer>. For example:\n\n<final_answer>The weather in SF is 64 degrees</final_answer>\n\nBegin!\n\nPrevious Conversation:\n{chat_history}\n\nQuestion: {input}\n{agent_scratchpad}",
"temperature": 0.2

},
"timeout_ms": 50000
}
Expand Down
129 changes: 129 additions & 0 deletions fluent_cli/functional_tests/functional_test_02.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
#!/bin/bash

# Configuration
CLI_PATH="/Users/n/RustroverProjects/fluent_cli/fluent_cli/target/release/fluent_cli"
TEST_DATA_PATH="/Users/n/RustroverProjects/fluent_cli/fluent_cli/functional_tests"
CSV_FILE="/Users/n/Downloads/functional_test_results.csv"
LOG_FILE="/Users/n/Downloads/functional_test_log.txt"
SYSTEM_PROMPT_FILE="$TEST_DATA_PATH/functional_test_spanish_system_prompt.txt"
OUTLINE_FILE="$TEST_DATA_PATH/functional_test_outline.txt"
CONTEXT_FILE="$TEST_DATA_PATH/functional_test_context.txt"

# Validation CLI and Flowname
VALIDATION_CLI="fluent"
VALIDATION_FLOWNAME="HaikuToolAgentRepoCloud"

# Flow names array
declare -a FLOWNAMES=("GroqMixtral8x7bAgentAnotherWebService" "SonnetXMLAgentAnowtherWebService" "GroqLLama370b8192AgentAnotherWebService" "MistralLargeToolAgentAnowtherWebService" "GPT4FunctionAgentWithMemoryAndBrowsing")

# Start new log file
echo "Starting new test session at $(date)" > "$LOG_FILE"

# Initialize CSV
echo "FlowName,TestID,Result,Runtime(s)" > "$CSV_FILE"

# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
BOLD='\033[1m'

# Initialize a counter for the test number
test_number=0

# Helper function for running a single test
run_test() {

local flowname="$1"
local test_id="$2"
local command="$3"
local validation_command="$4"

# Increment the test counter
((test_number++))

local test_start_time=$(date +%s) # Start time for this test

echo -e "${BOLD}=================================================================================================${NC}" | tee -a "$LOG_FILE"
echo -e "${GREEN}Test Number $test_number: Testing $test_id for Flow: $flowname${NC}" | tee -a "$LOG_FILE"
echo -e "${BOLD}-------------------------------------------------------------------------------------------------${NC}" | tee -a "$LOG_FILE"

local test_output=$(eval "$command" | tee -a "$LOG_FILE" | tee /dev/tty | eval "$validation_command")
local result=$(echo "$test_output" | grep -oE "PASS|FAIL")

local test_end_time=$(date +%s) # End time for this test
local test_runtime=$((test_end_time - test_start_time)) # Runtime for this test

if [[ "$result" == "PASS" ]]; then
echo -e "${GREEN}Test Number $test_number: $result Tested $test_id${NC}" | tee -a "$LOG_FILE"
else
echo -e "${RED}Test Number $test_number: $result Tested $test_id${NC}" | tee -a "$LOG_FILE"
fi

echo -e "${BOLD}=================================================================================================${NC}" | tee -a "$LOG_FILE"
echo "$flowname,$test_id,$result,$test_runtime" >> "$CSV_FILE" # Log result with runtime
sleep 1
}

# Loop through each flow name
for FLOWNAME in "${FLOWNAMES[@]}"; do
echo ""
echo ""
echo ""
echo -e "\t\t\t\t\t****Running tests for $FLOWNAME****" | tee -a "$LOG_FILE"
echo ""
echo ""
echo ""

run_test "$FLOWNAME" "Base Command Test" \
"$CLI_PATH $FLOWNAME 'This is a test, respond that this is a test'" \
"$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request is about this is a test'"

run_test "$FLOWNAME" "Stdin Context Test" \
"cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'This is the content: '" \
"$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request has the word northstar or North Star'"

run_test "$FLOWNAME" "Additional Context File Test" \
"$CLI_PATH $FLOWNAME 'What is the content: ' --Additional-Context-File \"$OUTLINE_FILE\"" \
"$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains the word TheLardCatFellFlatOnTheMat'"

run_test "$FLOWNAME" "Combined Stdin and Additional Context Test" \
"cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'What are these contents about:' --Additional-Context-File \"$OUTLINE_FILE\"" \
"$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the reqeust contains TheLardCatFellFlatOnTheMat and talks about the word northstar or North Star'"

run_test "$FLOWNAME" "Base Command Test and --System-Prompt-Override-Inline" \
"$CLI_PATH $FLOWNAME 'This is a test, respond that this is a test' --System-Prompt-Override-Inline 'You can only reply in german'" \
"$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request is about this is a test and is in german'"

run_test "$FLOWNAME" "Stdin Context Test and --System-Prompt-Override-Inline" \
"cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'This is the content: '--System-Prompt-Override-Inline 'You can only reply in german' " \
"$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request has the word northstar or North Star and is in german'"

run_test "$FLOWNAME" "Additional Context File Test and --System-Prompt-Override-Inline" \
"$CLI_PATH $FLOWNAME 'What is the content:' --Additional-Context-File \"$OUTLINE_FILE\" --System-Prompt-Override-Inline 'You can only reply in german' " \
"$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains the word TheLardCatFellFlatOnTheMat and is in german'"

run_test "$FLOWNAME" "Combined Stdin and Additional Context Test and --System-Prompt-Override-Inline" \
"cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'What are these contents about:' --Additional-Context-File \"$OUTLINE_FILE\" --System-Prompt-Override-Inline 'You can only reply in german'" \
"$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains TheLardCatFellFlatOnTheMat and talks about the word northstar or North Star and is in german'"

run_test "$FLOWNAME" "Base Command Test and --System-Prompt-Override-File" \
"$CLI_PATH $FLOWNAME 'This is a test, respond that this is a test' --System-Prompt-Override-File \"$SYSTEM_PROMPT_FILE\" " \
"$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request is about this is a test and is in spanish'"

run_test "$FLOWNAME" "Stdin Context Test and --System-Prompt-Override-File" \
"cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'This is the content: ' --System-Prompt-Override-File \"$SYSTEM_PROMPT_FILE\" " \
"$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request has the word northstar or North Star and is in spanish'"

run_test "$FLOWNAME" "Additional Context File Test and --System-Prompt-Override-File" \
"$CLI_PATH $FLOWNAME 'What is the content: ' --Additional-Context-File \"$OUTLINE_FILE\" --System-Prompt-Override-File \"$SYSTEM_PROMPT_FILE\" " \
"$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains the word TheLardCatFellFlatOnTheMat and is in spanish'"

run_test "$FLOWNAME" "Combined Stdin and Additional Context Test and --System-Prompt-Override-File" \
"cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'What are these contents about:' --Additional-Context-File \"$OUTLINE_FILE\" --System-Prompt-Override-File \"$SYSTEM_PROMPT_FILE\" " \
"$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains TheLardCatFellFlatOnTheMat and talks about the word northstar or North Star and is in spanish'"

done

# Open the CSV file with the default application
open "$CSV_FILE"
1 change: 1 addition & 0 deletions fluent_cli/functional_tests/functional_test_context.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
The northstar never moves.
2 changes: 2 additions & 0 deletions fluent_cli/functional_tests/functional_test_outline.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
ChickenLickenIsQuicklyTicken
TheLardCatFellFlatOnTheMat
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
you can only reply in spanish.
9 changes: 4 additions & 5 deletions fluent_cli/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ struct FluentCliOutput {
#[serde(rename = "sessionId")]
pub(crate) session_id: String,
#[serde(rename = "memoryType")]
memory_type: String,
memory_type: Option<String>,
}

#[derive(Serialize, Deserialize, Debug)]
Expand All @@ -45,7 +45,7 @@ pub fn handle_response(response_body: &str) -> Result<()> {
println!("\tQuestion:\n\t{}", question_text);
println!("\tChat ID: {}", parsed_output.chat_id);
println!("\tSession ID: {}", parsed_output.session_id);
println!("\tMemory Type: {}", parsed_output.memory_type);
println!("\tMemory Type: {:?}", parsed_output.memory_type);

Ok(())
}
Expand Down Expand Up @@ -107,12 +107,11 @@ pub(crate) fn build_request_payload(question: &str, context: Option<&str>) -> Va
} else {
question.to_string() // Use question as is if no context
};

debug!("build_request_payload - Full question: {}", full_question);
// Now create the payload with the potentially modified question
let payload = json!({
"question": full_question, // Use the potentially modified question
});

debug!("Request payload: {:?}", payload);
debug!("build_request_payload - Request payload: {:?}", payload);
payload
}
8 changes: 5 additions & 3 deletions fluent_cli/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,9 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
if context.is_none() && !atty::is(atty::Stream::Stdin) {
tokio::io::stdin().read_to_string(&mut additional_context).await?;
}
debug!("Additional context: {:?}", additional_context);
let final_context = context.or(if !additional_context.is_empty() { Some(&additional_context) } else { None });
debug!("Context: {:?}", final_context);

// Load override value from CLI if specified for system prompt override, file will always win
let system_prompt_inline = matches.value_of("System-Prompt-Override-Inline");
Expand Down Expand Up @@ -80,16 +82,16 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
}

// Combine file contents with other forms of context if necessary
let final_context = match (context, file_contents.is_empty()) {
let actual_final_context = match (final_context, file_contents.is_empty()) {
(Some(cli_context), false) => Some(format!("{} {}", cli_context, file_contents)),
(None, false) => Some(file_contents),
(Some(cli_context), true) => Some(cli_context.to_string()),
(None, true) => None,
};

debug!("Actual Final context: {:?}", actual_final_context);

// Build the request payload
let payload = client::build_request_payload(request, final_context.as_deref());
let payload = client::build_request_payload(request, actual_final_context.as_deref());

// Decrypt the keys in the flow config
let mut env_guard = EnvVarGuard::new();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
You are a helpful assistant. Help the user answer any questions.

You have access to the following tools:

{tools}

In order to use a tool, you can use <tool></tool> and <tool_input></tool_input> tags. You will then get back a response in the form <observation></observation>
For example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:

<tool>search</tool><tool_input>weather in SF</tool_input>
<observation>64 degrees</observation>

When you are done, respond with a final answer between <final_answer></final_answer>. For example:

<final_answer>The weather in SF is 64 degrees</final_answer>

Begin!

Previous Conversation:
{chat_history}

Question: {input}
{agent_scratchpad}
点击 这是indexloc提供的php浏览器服务,不要输入任何密码和下载