diff --git a/amber.yaml b/amber.yaml index 09e53d1..7d571d3 100644 --- a/amber.yaml +++ b/amber.yaml @@ -7,6 +7,9 @@ secrets: - name: AMBER_ANOTHERWEBSERVICE_NJF_02 sha256: ca0d769067f0f73b870b74221d0da1313b776fe05a0c90f92d4342a6f7d719f2 cipher: 02d8895644cccc86997912ae8637d6e26985e09c6bff08f88eecd90f5e692e2b1769bbae7f55ae29272fbc5a1994c191a28fbf5ea8543e9867925e156110a859531e3a420c197f085e8f2052f4aa77790a5ef3a31780022bf39940b5 +- name: AMBER_ASTRA_LANGFLOW_APP_TOKEN + sha256: 32a3b7f99eb6be0b0727a1cc8102dca99474a6f7f0eafd620c4cc7b79e3a4997 + cipher: 5566ac15884bc38b1d6f81c2456bcf7ad9252eccedd03c17bc0f9a54bbd7d865430784d3c83da5ba49852b273bcf38f5c6388e95a819721a2521f80ea01302294ce805a2e3dabb0a9bddb3752e5fee285fdf535568205763fe95cc96dd5711da7abfe079ab6589bf1b4cb40a842d8ce65a49c44be3534296bbe061f522c7fcab12a2e70dbb4f5a904a036e30ffdebf631e - name: AMBER_FLUENT_AIRTABLE_PAT_01 sha256: 2d84652a1a74e05de3dfdc0a8445e8ec822da2f189c1509eeba2a302a072c243 cipher: 3fa55997cda2fbea79e81ad570d6ce14c5186fd3d401665ccb88ed2f62082e55ed82346ff7d85062a1b5001ea20c62f7f2e3d42f1377a33559216bc5f9e57fd6aa53c27ae6f44a81520f07d428d71f1947a0fb919febf4c0ab837afcb67e283e306a06518acaf35ba382c8f2ada07f879e9a98c160fa9df032af415eeff55bcc2f4e diff --git a/fluent_cli/Cargo.toml b/fluent_cli/Cargo.toml index 101da52..db2ec02 100644 --- a/fluent_cli/Cargo.toml +++ b/fluent_cli/Cargo.toml @@ -34,7 +34,6 @@ indicatif = "0.17.8" term_size = "1.0.0-beta1" crossterm="0.25.0" terminal_size = "0.3.0" -wasm-bindgen-cli = "0.2" [dependencies.getrandom] diff --git a/fluent_cli/config.json b/fluent_cli/config.json index aec8445..e3b1295 100644 --- a/fluent_cli/config.json +++ b/fluent_cli/config.json @@ -18,6 +18,79 @@ }, "timeout_ms": 50000 }, + { + "name": "LocalGoogleFlashChain", + "engine": "flowise", + "protocol": "http", + "hostname": "127.0.0.1", + "port": 3000, + "chat_id": "fbaa82fb-6312-4bbd-a841-bdcf2a8c2bba", + "request_path": "/api/v1/prediction/", + "sessionId": "AMBER_FLUENT_SESSION_ID_01", + "bearer_token": "AMBER_REPO_CLOUD_FLUENT_DEMO_KEY", + "overrideConfig": { + "modelName": "gemini-1.5-flash-latest" + }, + "tweaks": { + + }, + "timeout_ms": 50000 +}, + + { + "name": "LocalGPT4SupervisorWorkerFlow", + "engine": "flowise", + "protocol": "https", + "hostname": "flowise.fluentcli.com", + "port": 443, + "chat_id": "57ead6df-627f-45c9-948a-9bc2b19e6a2e", + "request_path": "/api/v1/prediction/", + "sessionId": "AMBER_FLUENT_SESSION_ID_01", + "bearer_token": "AMBER_REPO_CLOUD_FLUENT_DEMO_KEY", + "overrideConfig": { + "stripNewLines": true, + "modelName": { + "chatOpenAICustom_0": "gpt-4o" + }, + "openAIApiKey": { + "chatOpenAICustom_0": "AMBER_FLUENT_OPENAI_API_KEY_01" + }, + "temperature": 0, + "systemMessage": "you are a helpful assistant", + "serpApiKey": "AMBER_FLUENT_SERPAPI_KEY_01" + }, + "tweaks": { + + }, + "timeout_ms": 50000 + }, + { + "name": "GPT4SupervisorWorkerBrowserAndGitHubFlowRepoCloud", + "engine": "flowise", + "protocol": "https", + "hostname": "flowise.fluentcli.com", + "port": 443, + "chat_id": "d170e885-f0d6-42f1-b27f-8fe6c579ac64", + "request_path": "/api/v1/prediction/", + "sessionId": "AMBER_FLUENT_SESSION_ID_01", + "bearer_token": "AMBER_REPO_CLOUD_FLUENT_DEMO_KEY", + "overrideConfig": { + "stripNewLines": true, + "modelName": { + "chatOpenAICustom_0": "gpt-4o" + }, + "openAIApiKey": { + "chatOpenAICustom_0": "AMBER_FLUENT_OPENAI_API_KEY_01" + }, + "temperature": 0, + "systemMessage": "you are a helpful assistant", + "serpApiKey": "AMBER_FLUENT_SERPAPI_KEY_01" + }, + "tweaks": { + + }, + "timeout_ms": 500000 + }, { "name": "StarCoder2HuggingFaceRepoCloud", @@ -116,6 +189,68 @@ }, "timeout_ms": 500000 }, + { + "name": "FluentCLIAstraRetrivalQAChainRepoCloud", + "engine": "flowise", + "protocol": "https", + "hostname": "9d81nz4o.rpcld.co", + "port": 443, + "chat_id": "e976497f-7fa6-40b0-a879-ed1b49ffd4bc", + "request_path": "/api/v1/prediction/", + "upsert_path": "/api/v1/vector/upsert/", + "sessionId": "", + "bearer_token": "AMBER_REPO_CLOUD_FLUENT_DEMO_KEY", + "overrideConfig": { + "returnSourceDocuments": true, + "chainName": "FluentCLIAstraRetrivalQAChainRepoCloud", + "sessionId": "AMBER_FLUENT_SESSION_ID_01", + "openAIApiKey": "AMBER_FLUENT_OPENAI_API_KEY_01", + "searchApiKey": "AMBER_FLUENT_SEARCHAPI_KEY_ID_01", + "accessToken": "AMBER_FLUENT_GITHUB_PAT_KEY_01", + + "repoLink": "https://github.com/njfio/fluent_cli/", + "branch": "main", + "recursive": true, + "maxRetries" : 3, + "language":{ + }, + "modelName": { + "chatOpenAI_0": "gpt-4o", + "openAIEmbeddings_0": "text-embedding-ada-002" + }, + + "applicationToken": "AMBER_ASTRA_LANGFLOW_APP_TOKEN", + "dbEndPoint": "https://ee12d7de-4703-467d-aaee-8cde05f10f47-us-east-2.apps.astra.datastax.com", + "astraNamespace": "fluent", + "astraCollection": "fluent_collection", + "vectorDimension": 1536, + "similarityMetric": "cosine", + "topK": 10, + "searchType": "similarity", + "fetchK": 20, + "lambda": 0.5, + "temperature": 0.1, + "maxTokens": 1500, + "topP": 0.9, + "frequencyPenalty": 0.5, + "presencePenalty": 0.6, + "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:", + "responsePrompt": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure\" and stop after that. Refuse to answer any question not about the info. Never break character.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure\". Don't try to make up an answer. Never break character.", + "allowImageUploads": true, + "imageResolution": "high", + "webPath": "https://njf.gitbook.io/fluent_cli_gitbook", + "shouldLoadAllPaths": true, + "maxConcurrency": 5, + "stripNewLines": false, + "batchSize": 100, + "chunkSize": 1000, + "chunkOverlap": 200 + }, + "tweaks": { + + }, + "timeout_ms": 500000 + }, { "name": "LangFlowExample", @@ -923,6 +1058,29 @@ }, "timeout_ms": 5000000 }, + { + "name": "MakeDalleImagePostRawOutput", + "engine": "webhook", + "protocol": "https", + "hostname": "hook.us1.make.com", + "port": 443, + "chat_id": "ns0sts8l8tqgeztgyftrdpvrsc1jas7m", + "request_path": "/", + "sessionId": "", + "bearer_token": "AMBER_MAKE_LEONARDO_IMAGE_POST", + "overrideConfig": { + "size": "1024x1792", + "quality":"HD", + "style": "Vivid", + "responseFormat": "url", + "makeAuthentication": "AMBER_MAKE_LEONARDO_IMAGE_POST" + + }, + "tweaks": { + + }, + "timeout_ms": 5000000 + }, { "name": "MakeLeonardoImagePost", "engine": "webhook", @@ -948,6 +1106,31 @@ }, "timeout_ms": 5000000 }, + { + "name": "MakeLeonardoImagePostRawOutput", + "engine": "webhook", + "protocol": "https", + "hostname": "hook.us1.make.com", + "port": 443, + "chat_id": "hbxg9tuxhh9bjki2reop2993ogbjr6ej", + "request_path": "/", + "sessionId": "", + "bearer_token": "AMBER_MAKE_LEONARDO_IMAGE_POST", + "overrideConfig": { + "modelID": "AMBER_LEONARDO_AI_KINO_XL_MODEL_ID", + "negative_prompt": "words, letters, symbols, hands, deformities, low-quality,", + "alchemy": true, + "photoReal": true, + "photoRealVersion":"v2", + "presetStyle": "", + "makeAuthentication": "AMBER_MAKE_LEONARDO_IMAGE_POST", + "seed": "" + }, + "tweaks": { + + }, + "timeout_ms": 5000000 + }, { "name": "MakeLeonardoImagePostTest", "engine": "flowise", diff --git a/fluent_cli/src/client.rs b/fluent_cli/src/client.rs index ae71e2b..b2af224 100644 --- a/fluent_cli/src/client.rs +++ b/fluent_cli/src/client.rs @@ -12,19 +12,63 @@ use tokio::fs::File; use tokio::io::AsyncReadExt; -#[derive(Serialize, Deserialize, Debug)] +#[derive(Debug, Deserialize)] pub struct FluentCliOutput { - pub(crate) text: String, - pub(crate) question: String, + pub text: String, + pub question: Option, #[serde(rename = "chatId")] - pub(crate) chat_id: String, + pub chat_id: Option, #[serde(rename = "chatMessageId")] - chat_message_id: String, + pub chat_message_id: Option, #[serde(rename = "sessionId")] - pub(crate) session_id: String, + pub session_id: Option, #[serde(rename = "memoryType")] - memory_type: Option, + pub memory_type: Option, + #[serde(rename = "sourceDocuments")] + #[serde(skip_serializing_if = "Option::is_none")] + pub source_documents: Option>>, + #[serde(rename = "agentReasoning")] + pub agent_reasoning: Option>, +} +#[derive(Debug, Deserialize)] +pub struct AgentReasoning { + #[serde(rename = "agentName")] + pub agent_name: String, + pub messages: Vec, + pub next: Option, + pub instructions: Option, + #[serde(rename = "usedTools")] + pub used_tools: Option>>, + #[serde(rename = "sourceDocuments")] + pub source_documents: Option>>, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct SourceDocument { + #[serde(skip_serializing_if = "Option::is_none")] + pub page_content: Option, + pub metadata: Option, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Metadata { + pub source: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub repository: Option, // Make repository optional + pub branch: String, + pub loc: Location, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Location { + pub lines: Lines, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct Lines { + pub from: i32, + pub to: i32, } #[derive(Serialize, Deserialize, Debug)] @@ -32,7 +76,6 @@ struct Question { question: String, } - #[derive(Serialize, Deserialize)] struct RequestPayload { question: String, @@ -58,11 +101,11 @@ struct ResponseOutput { chat_id: Option, session_id: Option, memory_type: Option, - code_blocks: Option>, // Only populated if `--parse-code-output` is present - pretty_text: Option, // Only populated if `--parse-code-output` is not present + code_blocks: Option>, + pretty_text: Option, + source: Option, } - // New structure to handle LangFlow output #[derive(Serialize, Deserialize, Debug)] struct LangFlowOutput { @@ -150,15 +193,14 @@ pub async fn handle_langflow_response(response_body: &str, matches: &clap::ArgMa } if !matches.get_one::("markdown-output").map_or(false, |&v| v) && - !matches.get_one::("parse-code-output").map_or(false, |&v| v) && - matches.get_one::("full-output").map_or(true, |&v| v) { + !matches.get_one::("parse-code-output").map_or(false, |&v| v) && + matches.get_one::("full-output").map_or(true, |&v| v) { println!("{}", response_body); // Output the full raw response } if !matches.get_one::("markdown-output").map_or(false, |&v| v) && !matches.get_one::("parse-code-output").map_or(false, |&v| v) && !matches.get_one::("full-output").map_or(false, |&v| v) { - println!("{}", response_text); // Default output } }, @@ -175,20 +217,48 @@ pub async fn handle_langflow_response(response_body: &str, matches: &clap::ArgMa Ok(()) } - - - - pub async fn handle_response(response_body: &str, matches: &clap::ArgMatches) -> Result<()> { // Parse the response body, handle error properly here instead of unwrapping debug!("Response body: {}", response_body); let result = serde_json::from_str::(response_body); debug!("Result: {:?}", result); + // If there's an error parsing the JSON, print the error and the raw response body match result { Ok(parsed_output) => { // If parsing is successful, use the parsed data - debug!("Parsed Output:{:?}", parsed_output); + debug!("Parsed Output: {:?}", parsed_output); + + // Print agent reasoning details if present + if let Some(agent_reasoning) = &parsed_output.agent_reasoning { + eprintln!("\nAgent Reasoning Details:"); + for agent in agent_reasoning { + eprintln!("Agent Name: {}", agent.agent_name); + if !agent.messages.is_empty() { + eprintln!("Messages:"); + for message in &agent.messages { + eprintln!("- {}", message); + } + } + if let Some(next) = &agent.next { + eprintln!("Next Step: {}", next); + } + if let Some(instructions) = &agent.instructions { + eprintln!("Instructions: {}", instructions); + } + if let Some(used_tools) = &agent.used_tools { + if !used_tools.is_empty() { + eprintln!("Used Tools:"); + for tool in used_tools { + if let Some(tool_name) = tool { + eprintln!("- {}", tool_name); + } + } + } + } + eprintln!("\n---\n"); + } + } if let Some(directory) = matches.get_one::("download-media").map(|s| s.as_str()) { let urls = extract_urls(response_body); // Assume extract_urls can handle any text @@ -198,8 +268,30 @@ pub async fn handle_response(response_body: &str, matches: &clap::ArgMatches) -> if matches.get_one::("markdown-output").map_or(true, |&v| v) && !matches.get_one::("parse-code-output").map_or(false, |&v| v) && !matches.get_one::("full-output").map_or(false, |&v| v) { - debug!("markdown"); pretty_format_markdown(&parsed_output.text); + if let Some(documents) = &parsed_output.source_documents { + pretty_format_markdown("\n---\n"); + pretty_format_markdown("\n\n# Source Documents\n"); + pretty_format_markdown("\n---\n"); + for doc_option in documents { + if let Some(doc) = doc_option { + let markdown_link = format!( + "[View Source]({}/blob/{}/{}#L{}-L{})", + doc.metadata.as_ref().unwrap().repository.as_ref().unwrap_or(&"".to_string()), + doc.metadata.as_ref().unwrap().branch, + doc.metadata.as_ref().unwrap().source, + doc.metadata.as_ref().unwrap().loc.lines.from, + doc.metadata.as_ref().unwrap().loc.lines.to + ); + pretty_format_markdown(&markdown_link); + match &doc.page_content { + Some(content) if !content.is_empty() => pretty_format_markdown(&format!("**Page Content:**\n{}", content)), + _ => pretty_format_markdown("**Page Content:**\nNo content available"), + } + } + } + pretty_format_markdown("---\n"); + } } if !matches.get_one::("markdown-output").map_or(false, |&v| v) && @@ -216,22 +308,22 @@ pub async fn handle_response(response_body: &str, matches: &clap::ArgMatches) -> !matches.get_one::("parse-code-output").map_or(false, |&v| v) && matches.get_one::("full-output").map_or(true, |&v| v) { debug!("full output"); - println!("{}", response_body); // Output the text used, whether parsed or raw + println!("{}", response_body); } if !matches.get_one::("markdown-output").map_or(false, |&v| v) && !matches.get_one::("parse-code-output").map_or(false, |&v| v) && !matches.get_one::("full-output").map_or(false, |&v| v) { debug!("default"); - println!("{}", &parsed_output.text); // Output the text used, whether parsed or raw, but only if the --markdown-output flag is not set").text; + println!("{}", &parsed_output.text); } - }, - + }, Err(e) => { // If there's an error parsing the JSON, print the error and the raw response body + eprintln!("Failed to parse JSON: {}", e); if let Some(cause) = e.source() { - eprintln!("{:?}", cause); + eprintln!("Cause: {:?}", cause); } if let Some(directory) = matches.get_one::("download-media").map(|s| s.as_str()) { let urls = extract_urls(response_body); // Assume extract_urls can handle any text @@ -240,15 +332,12 @@ pub async fn handle_response(response_body: &str, matches: &clap::ArgMatches) -> } debug!("Download Response body: {}", response_body); println!("{}", response_body); - eprint!("\n\n"); - response_body.to_string(); } }; Ok(()) } - fn extract_urls(text: &str) -> Vec { let url_regex = Regex::new(r"https?://[^\s]+").unwrap(); url_regex.find_iter(text) @@ -256,7 +345,6 @@ fn extract_urls(text: &str) -> Vec { .collect() } - pub fn print_full_width_bar(string: &str) -> String { let width = terminal_size::terminal_size().map(|(terminal_size::Width(w), _)| w as usize).unwrap_or(80); string.repeat(width).dark_yellow().to_string() @@ -269,20 +357,15 @@ fn pretty_format_markdown(markdown_content: &str) { skin.bold.set_fg(crossterm::style::Color::Yellow); skin.italic.set_fg(crossterm::style::Color::Blue); skin.headers[0].set_fg(crossterm::style::Color::Yellow); - // skin.headers[0].set_bg(crossterm::style::Color::Black); skin.headers[1].set_fg(crossterm::style::Color::Green); - //skin.headers[1].set_bg(crossterm::style::Color::Black); skin.headers[2].set_fg(crossterm::style::Color::Blue); - //skin.inline_code.set_bg(crossterm::style::Color::Black); skin.inline_code.set_fg(crossterm::style::Color::White); - //skin.code_block.set_bg(crossterm::style::Color::Black); skin.code_block.set_fg(crossterm::style::Color::White); - //skin.set_bg(crossterm::style::Color::Black); skin.paragraph.left_margin = 4; skin.paragraph.right_margin = 4; - skin.print_text(markdown_content); + skin.print_text(markdown_content); } fn extract_code_blocks(markdown_content: &str) -> Vec { @@ -294,15 +377,9 @@ fn extract_code_blocks(markdown_content: &str) -> Vec { .collect() } - use tokio::io::AsyncWriteExt; use chrono::Local; - -// Correct definition of the function returning a Result with a boxed dynamic error - - - async fn download_media(urls: Vec, directory: &str) { let client = reqwest::Client::new(); @@ -344,10 +421,8 @@ async fn download_media(urls: Vec, directory: &str) { } } - // Change the signature to accept a simple string for `question` - -pub async fn send_request(flow: &FlowConfig, payload: &Value) -> reqwest::Result { +pub async fn send_request(flow: &FlowConfig, payload: &Value) -> reqwest::Result { let client = Client::new(); // Dynamically fetch the bearer token from environment variables if it starts with "AMBER_" @@ -364,7 +439,6 @@ pub async fn send_request(flow: &FlowConfig, payload: &Value) -> reqwest::Resul replace_with_env_var(&mut override_config); debug!("Override config after update: {:?}", override_config); - let url = format!("{}://{}:{}{}{}", flow.protocol, flow.hostname, flow.port, flow.request_path, flow.chat_id); debug!("URL: {}", url); debug!("Body: {}", payload); @@ -383,15 +457,12 @@ pub async fn send_request(flow: &FlowConfig, payload: &Value) -> reqwest::Resul response.text().await } - - use std::error::Error as StdError; // Import the StdError trait for `source` method fn to_serde_json_error(err: E) -> serde_json::Error { serde_json::Error::custom(err.to_string()) } - pub async fn upsert_with_json(api_url: &str, flow: &FlowConfig, payload: serde_json::Value) -> Result<()> { let bearer_token = if flow.bearer_token.starts_with("AMBER_") { env::var(&flow.bearer_token[6..]).unwrap_or_else(|_| flow.bearer_token.clone()) @@ -472,7 +543,6 @@ pub async fn upload_files(api_url: &str, file_paths: Vec<&str>) -> Result<()> { Ok(()) } - use tokio::fs::File as TokioFile; // Alias to avoid confusion with std::fs::File use tokio::io::{AsyncReadExt as TokioAsyncReadExt, Result as IoResult}; @@ -504,8 +574,6 @@ pub async fn prepare_payload(flow: &FlowConfig, question: &str, file_path: Optio |ctx| format!("\n{}\n{}\n", question, ctx) ); - - debug!("Engine: {}", flow.engine); let mut body = match flow.engine.as_str() { "flowise" => { @@ -522,7 +590,6 @@ pub async fn prepare_payload(flow: &FlowConfig, question: &str, file_path: Optio system_prompt_inline.map(|s| s.to_string()) }; - // Update the configuration with the override if it exists // Update the configuration based on what's present in the overrideConfig let mut flow_clone = flow.clone(); @@ -537,7 +604,6 @@ pub async fn prepare_payload(flow: &FlowConfig, question: &str, file_path: Optio } } - debug!("Flowise Engine"); serde_json::json!({ "question": full_question,