diff --git a/amber.yaml b/amber.yaml index 5f9c25d..972cc5c 100644 --- a/amber.yaml +++ b/amber.yaml @@ -7,18 +7,30 @@ secrets: - name: AMBER_ANOTHERWEBSERVICE_NJF_02 sha256: ca0d769067f0f73b870b74221d0da1313b776fe05a0c90f92d4342a6f7d719f2 cipher: 02d8895644cccc86997912ae8637d6e26985e09c6bff08f88eecd90f5e692e2b1769bbae7f55ae29272fbc5a1994c191a28fbf5ea8543e9867925e156110a859531e3a420c197f085e8f2052f4aa77790a5ef3a31780022bf39940b5 +- name: AMBER_FLUENT_AIRTABLE_PAT_01 + sha256: 2d84652a1a74e05de3dfdc0a8445e8ec822da2f189c1509eeba2a302a072c243 + cipher: 3fa55997cda2fbea79e81ad570d6ce14c5186fd3d401665ccb88ed2f62082e55ed82346ff7d85062a1b5001ea20c62f7f2e3d42f1377a33559216bc5f9e57fd6aa53c27ae6f44a81520f07d428d71f1947a0fb919febf4c0ab837afcb67e283e306a06518acaf35ba382c8f2ada07f879e9a98c160fa9df032af415eeff55bcc2f4e - name: AMBER_FLUENT_ANTHROPIC_KEY_01 sha256: 8f66c5f92b94d9228e9d32b30a29f3848fcd4ad9f4ef1ec4d153559be015c3e9 cipher: da52538627a1a409458d749f43bf74138fd9aa9b0a8dc9e441ef15035aaae84f4b111cfa894ee9462231f90401177db64dc72c2c7dd8c38f79c36e61a10f592a4fcb1b973220c35854370ec57fdc62886d87398480cdd01aa75b7913712633147628606c9ad832b8b2e837937b7f6e70fd4e1615a23b3d72e5a26d2838c6e03728430032a4b8cbe16a3be07a98136cc191d3a56709a2b23efc76e066 - name: AMBER_FLUENT_GEMINI_API_KEY_01 sha256: dcc3fc4b33e620c9d1c1fb81b1c21ec36eaa0a3d71137949e17120b996decbb7 cipher: 0e05274547adb1cb064de584a1e69190edec52b22bf1059c8bd83d02146ed0732b7bcb3d9b01481d1b17432b66b6fc594f617aeecc5e4cb2a699539a6db7713f19da92356846b47b7324c7de089d550be4110a304de482 +- name: AMBER_FLUENT_GITBOOK_PAT_01 + sha256: f4bd8dd160e99ad258b3ca32942ff0800726020c99eba67b9a0e5bfc06bedc20 + cipher: 317def672561a6f565fc89b9a5f10ba8f300157a2df4719aca98a41350700e0122c071244e4be34dd14274af0a80f904f562550401f6f75d9ec6e189ef78632f8bf92612f6dfc020c8269df290d369c148cba4a804175427d700a3dad92d59 - name: AMBER_FLUENT_GITHUB_PAT_KEY_01 sha256: ba3ab7e0594b847ce15f159c0c81137bf04228c61dbc030b023deae7ce0faefc cipher: 8f3834349b01542ab105c97f4b42bbabbf4d42c93c56b3975c29ddcf04d6e45b6331110cedacba97cd185d9581d27ed1cf06cd04c7fc8246cc3fd8e66ab2a083d85dba17391499e0bc064c8f57338a8ba3e983bcb1c436d5f2bc5f28d698e1e51dadb7805402cd86d1d90accb0a47488540ab184dc081a1350876505fa774ba2b79e87bfa5ca4241ea07126a2d - name: AMBER_FLUENT_GROQ_API_KEY_01 sha256: 89f7aeec36590e47a2eab221b2381dd70343ae36b13e8e17a9f6dc62f1cb80be cipher: 6ff9c99f465f1354cc8a107d5cbbd1165be08dd1c0a77b67bbb22dd4866dcb2a2bb48622edeb0bfcc1f3b100b1a83871e6ea90dde44ee9b4909641af7bfd69790b45518884bb686cc22045441ba48d668d87b3893869bc25bf1c21777d2c132322349f387b8730b3 +- name: AMBER_FLUENT_HUGGINGFACE_API_KEY_01 + sha256: 6c9a4cc12e51d530fec68981c00d983d2068a5ea3a561745ae7d5d857671d282 + cipher: e92b7772dc3c2fd7879c430bc824ea2925a2a8008bf9100907c60ca4f9a33c4d584a5fab96ddd9faeaf0e4e7de201b54f8d8c0709bc3bf016cc68f75f043b13415a87640b68e873add33e14e8c7d822db1e55d2ff8 +- name: AMBER_FLUENT_LANGFLOW_API_KEY_01 + sha256: 63b3269b87aa1acb168b593027409e0d20ca4648d053854bdbdcdf41b599e643 + cipher: d7b7dee8f381472764ab2b568ee1f660eb28e839f254ab417d3dc3a33d31206bf24f22f0b0fc58cb51e4137de95ab253c6c8191594ed799d8c70d8f6f27c82e5c511e7ce6186b7b74b0bc067731ef0db - name: AMBER_FLUENT_LANGSMITH_KEY_01 sha256: ee941a5c4b1e1d8f66f9aff29f528367b43decebfd302ba733e14e4db14758ba cipher: ce521a396babf8658dc8b7378317b86f50de481e1b18e2838109ed53c5e2aa15f9702c313253fc20f71cbc84f3a1f04106786b120106921e09f2df78cd01dc3703f9a9408c66342762ec3409c57d04cd40a60613 diff --git a/fluent_cli/.cargo/config.toml b/fluent_cli/.cargo/config.toml new file mode 100644 index 0000000..1054f8f --- /dev/null +++ b/fluent_cli/.cargo/config.toml @@ -0,0 +1,5 @@ +[target.x86_64-pc-windows-gnu] +linker = "x86_64-w64-mingw32-gcc" + +[target.x86_64-unknown-linux-gnu] +linker = "x86_64-linux-gnu-gcc" diff --git a/fluent_cli/Cargo.toml b/fluent_cli/Cargo.toml index bd2b50e..046ffde 100644 --- a/fluent_cli/Cargo.toml +++ b/fluent_cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fluent" -version = "0.3.0" +version = "0.3.5" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -20,11 +20,16 @@ base64 = "0.21.7" infer = "0.8.0" pulldown-cmark = "0.9.0" regex = "1.10.4" -termimad = "0.14" +termimad = "0.29.2" serde_yaml = "0.9.34+deprecated" chrono = "0.4.38" anyhow = "1.0.68" colored = "2.1.0" tokio-util = "0.7.10" thiserror = "1.0.58" -mime_guess = "2.0.4" \ No newline at end of file +mime_guess = "2.0.4" +cross = "0.2.1" +indicatif = "0.17.8" +term_size = "1.0.0-beta1" +crossterm="0.25.0" +terminal_size = "0.3.0" diff --git a/fluent_cli/Cross.toml b/fluent_cli/Cross.toml new file mode 100644 index 0000000..0747854 --- /dev/null +++ b/fluent_cli/Cross.toml @@ -0,0 +1,7 @@ +[target.x86_64-unknown-linux-gnu] +image = "sciolence/cross-rs-custom-x86_64-unknown-linux-gnu:latest" +[target.x86_64-pc-windows-msvc] +image = "idhyt/cross-rs-x86_64-pc-windows-msvc:0.2.5" +[target.x86_64h-apple-darwin] +image = "idhyt/cross-rs-x86_64-apple-darwin:0.0.1" + diff --git a/fluent_cli/config.json b/fluent_cli/config.json index 788b0bf..62a9317 100644 --- a/fluent_cli/config.json +++ b/fluent_cli/config.json @@ -1,7 +1,45 @@ [ + { + "name": "LocalGoogleGeminiChain", + "engine": "flowise", + "protocol": "http", + "hostname": "127.0.0.1", + "port": 3000, + "chat_id": "fbaa82fb-6312-4bbd-a841-bdcf2a8c2bba", + "request_path": "/api/v1/prediction/", + "sessionId": "AMBER_FLUENT_SESSION_ID_01", + "bearer_token": "AMBER_REPO_CLOUD_FLUENT_DEMO_KEY", + "overrideConfig": { + + }, + "tweaks": { + + }, + "timeout_ms": 50000 + }, + + { + "name": "StarCoder2HuggingFaceRepoCloud", + "engine": "flowise", + "protocol": "https", + "hostname": "9d81nz4o.rpcld.co", + "port": 443, + "chat_id": "9b252777-f26f-4841-9dc2-fb9db02990e1", + "request_path": "/api/v1/prediction/", + "sessionId": "AMBER_FLUENT_SESSION_ID_01", + "bearer_token": "AMBER_REPO_CLOUD_FLUENT_DEMO_KEY", + "overrideConfig": { + "huggingFaceApiKey": "AMBER_FLUENT_HUGGINGFACE_API_KEY_01" + }, + "tweaks": { + + }, + "timeout_ms": 50000 + }, { "name": "FlowiseConversationalRetrivalQAChainRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -19,8 +57,8 @@ "pineconeApiKey": "AMBER_FLUENT_PINECONE_API_KEY_01", "repoLink": "https://github.com/FlowiseAI/Flowise/", "branch": "main", - "chunkOverlap": 300, - "chunkSize": 1000, + "chunkOverlap": 500, + "chunkSize": 3000, "recursive": true, "maxRetries" : 3, "language":{ @@ -28,14 +66,19 @@ }, "modelName": { "chatOpenAI_0": "gpt-4-turbo-preview", - "openAIEmbeddings_0": "text-embedding-3-small" + "openAIEmbeddings_0": "text-embedding-3-large" }, + "pineconeIndex": "large", "returnSourceDocuments": true + }, + "tweaks": { + }, "timeout_ms": 500000 }, { "name": "FluentCLIConversationalRetrivalQAChainRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -50,6 +93,8 @@ "openAIApiKey": "AMBER_FLUENT_OPENAI_API_KEY_01", "searchApiKey": "AMBER_FLUENT_SEARCHAPI_KEY_ID_01", "accessToken": "AMBER_FLUENT_GITHUB_PAT_KEY_01", + "pineconeApiKey": "AMBER_FLUENT_PINECONE_API_KEY_01", + "repoLink": "https://github.com/njfio/fluent_cli/", "branch": "main", "chunkOverlap": 500, @@ -65,12 +110,87 @@ "openAIEmbeddings_0": "text-embedding-3-large" }, "returnSourceDocuments": true + }, + "tweaks": { + }, "timeout_ms": 500000 }, + { + "name": "LangFlowExample", + "engine": "langflow", + "protocol": "https", + "hostname": "njfio-langflow-preview.hf.space", + "port": 443, + "request_path": "/api/v1/run/", + "chat_id": "0bb3d0a9-87b0-4023-850b-62c4003e5558", + "bearer_token": "AMBER_REPO_CLOUD_FLUENT_DEMO_KEY", + "input_value_key": "input_value", + "sessionId": "", + "output_type": "chat", + "input_type": "chat", + "overrideConfig": { + }, + + "tweaks": { + "Prompt-dqVZJ": { + "template": "you are a helpful assistant" + }, + "OpenAIModel-CIM9V": { + "openai_api_key": "AMBER_FLUENT_OPENAI_API_KEY_01", + "model_name": "gpt-4-turbo-preview" + }, + "ChatOutput-wTcXM": { + "session_id": "AMBER_FLUENT_SESSION_ID_01" + }, + "ChatInput-zRWPE": { + "session_id": "AMBER_FLUENT_SESSION_ID_01" + } + }, + "timeout_ms": 50000 + }, + + { + "name": "LangFlowBlogWriterExample", + "engine": "langflow", + "protocol": "https", + "hostname": "njfio-langflow-preview.hf.space", + "port": 443, + "request_path": "/api/v1/run/", + "chat_id": "c33be023-0326-49db-87b4-e52edab31006", + "bearer_token": "AMBER_REPO_CLOUD_FLUENT_DEMO_KEY", + "input_value_key": "input_value", + "sessionId": "", + "output_type": "chat", + "input_type": "chat", + "overrideConfig": { + + }, + "tweaks": { + "Prompt-SPWIv": {}, + "URL-3ptZT": { + "urls": [ + "https://github.com/njfio/fluent_cli" + ] + }, + "OpenAIModel-E1sw1": { + "openai_api_key": "AMBER_FLUENT_OPENAI_API_KEY_01", + "model_name": "gpt-4-turbo-preview" + }, + "URL-iMcRn": { + "urls": [ + "https://fluentcli.com" + ] + }, + "ChatInput-b5wyA": { + } + }, + "timeout_ms": 50000 + }, { "name": "GPTChainRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -92,11 +212,15 @@ }, "systemMessagePrompt": "you are a helpful assistant", "serpApiKey": "AMBER_FLUENT_SERPAPI_KEY_01" + }, + "tweaks": { + }, "timeout_ms": 50000 }, { "name": "GPT4ImageUploadRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -109,11 +233,15 @@ "openAIApiKey": "AMBER_FLUENT_OPENAI_API_KEY_01", "allowImageUploads": true, "temperature": 0.7 + }, + "tweaks": { + }, "timeout_ms": 500000 }, { "name": "GPT4ToolAgentWithTextFileUpsertRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -122,6 +250,34 @@ "upsert_path": "/api/v1/vector/upsert/", "sessionId": "AMBER_FLUENT_SESSION_ID_01", "bearer_token": "AMBER_REPO_CLOUD_FLUENT_DEMO_KEY", + "overrideConfig": { + "chainName": "GPT4ToolAgentWithTextFileUpsertRepoCloud", + "chunkOverlap": 500, + "chunkSize": 3000, + "openAIApiKey": "AMBER_FLUENT_OPENAI_API_KEY_01", + "modelName": { + "chatOpenAI_0": "gpt-4-turbo-preview", + "openAIEmbeddings_0": "text-embedding-3-small" + }, + "name": "local_retriever", + "description": "local retriever" + }, + "tweaks": { + + }, + "timeout_ms": 500000 + }, + { + "name": "GPT4ToolAgentWithCSVFileUpsert", + "engine": "flowise", + "protocol": "https", + "hostname": "9d81nz4o.rpcld.co", + "port": 443, + "chat_id": "48b416ed-a5df-4cc2-accd-fa7fc5126992", + "request_path": "/api/v1/prediction/", + "upsert_path": "/api/v1/vector/upsert/", + "sessionId": "AMBER_FLUENT_SESSION_ID_01", + "bearer_token": "AMBER_REPO_CLOUD_FLUENT_DEMO_KEY", "overrideConfig": { "chainName": "GPT4ToolAgentWithTextFileUpsertRepoCloud", "pineconeApiKey": "AMBER_FLUENT_PINECONE_API_KEY_01", @@ -135,11 +291,15 @@ "name": "local_retriever", "description": "local retriever", "pineconeIndex": "textfiles" + }, + "tweaks": { + }, "timeout_ms": 500000 }, { "name": "GPT4ToolAgentWithUpsertRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -174,11 +334,15 @@ "name": "fluentcli_retriever", "description": "fluentcli source code retriever", "returnSourceDocuments": true + }, + "tweaks": { + }, "timeout_ms": 500000 }, { "name": "GPT4FunctionAgentWithMemoryAndBrowsingRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -196,12 +360,16 @@ "openAIToolAgent_0": { "systemMessage": "You are a helpful assistant" } + }, + "tweaks": { + }, "timeout_ms": 50000 }, { "name": "PerplexitySonarMediumOnlineChainRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -219,11 +387,15 @@ "openAIToolAgent_0": { "systemMessage": "You are a helpful assistant" } + }, + "tweaks": { + }, "timeout_ms": 50000 }, { "name": "PerplexitySonarSmallOnlineChainRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -241,12 +413,16 @@ "openAIToolAgent_0": { "systemMessage": "You are a helpful assistant" } + }, + "tweaks": { + }, "timeout_ms": 50000 }, { "name": "MistralLargeToolAgentRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -260,12 +436,16 @@ "serpApiKey": "AMBER_FLUENT_SERPAPI_KEY_01", "maxIterations": 10, "systemMessage": "You are a helpful assistant" + }, + "tweaks": { + }, "timeout_ms": 50000 }, { "name": "GroqMixtral8x7bAgentRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -281,11 +461,15 @@ "systemMessagePrompt": "You are a helpful assistant", "temperature": 0.8, "modelName": "mixtral-8x7b-32768" + }, + "tweaks": { + }, "timeout_ms": 500000 }, { "name": "GroqLLama370b8192AgentRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -302,11 +486,15 @@ "temperature": 0.8, "modelName":"llama3-70b-8192", "memoryKey": "AMBER_FLUENT_SESSION_ID_01" + }, + "tweaks": { + }, "timeout_ms": 500000 }, { "name": "GroqGemma7bAgentRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -323,12 +511,16 @@ "temperature": 0.5, "modelName":"gemma-7b-it", "memoryKey": "AMBER_FLUENT_SESSION_ID_01" + }, + "tweaks": { + }, "timeout_ms": 500000 }, { "name": "SonnetXMLAgentRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -353,11 +545,15 @@ "SystemMessage": "You are a helpful assistant. Help the user answer any questions.\n\nYou have access to the following tools:\n\n{tools}\n\nIn order to use a tool, you can use and tags. You will then get back a response in the form \nFor example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:\n\nsearchweather in SF\n64 degrees\n\nWhen you are done, respond with a final answer between . For example:\n\nThe weather in SF is 64 degrees\n\nBegin!\n\nPrevious Conversation:\n{chat_history}\n\nQuestion: {input}\n{agent_scratchpad}", "temperature": 0.2 + }, + "tweaks": { + }, "timeout_ms": 50000 }, { "name": "SonnetToolAgentWithSearchAndWebRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -378,11 +574,15 @@ "chatOpenAI_0": "AMBER_FLUENT_OPENAI_API_KEY_01" }, "serpApiKey": "AMBER_FLUENT_SERPAPI_KEY_01" + }, + "tweaks": { + }, "timeout_ms": 50000 }, { "name": "SonnetChain", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -404,12 +604,16 @@ }, "systemMessagePrompt": "you are a helpful assistant", "serpApiKey": "AMBER_FLUENT_SERPAPI_KEY_01" + }, + "tweaks": { + }, "timeout_ms": 50000 }, { "name": "HaikuXMLAgentRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -434,11 +638,15 @@ "SystemMessage": "You are a helpful assistant. Help the user answer any questions.\n\nYou have access to the following tools:\n\n{tools}\n\nIn order to use a tool, you can use and tags. You will then get back a response in the form \nFor example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:\n\nsearchweather in SF\n64 degrees\n\nWhen you are done, respond with a final answer between . For example:\n\nThe weather in SF is 64 degrees\n\nBegin!\n\nPrevious Conversation:\n{chat_history}\n\nQuestion: {input}\n{agent_scratchpad}", "temperature": 0.8 + }, + "tweaks": { + }, "timeout_ms": 50000 }, { "name": "HaikuChain", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -460,12 +668,16 @@ }, "systemMessagePrompt": "you are a helpful assistant", "serpApiKey": "AMBER_FLUENT_SERPAPI_KEY_01" + }, + "tweaks": { + }, "timeout_ms": 50000 }, { "name": "OpusXMLAgentRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -491,10 +703,14 @@ "SystemMessage": "You are a helpful assistant. Help the user answer any questions.\n\nYou have access to the following tools:\n\n{tools}\n\nIn order to use a tool, you can use and tags. You will then get back a response in the form \nFor example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:\n\nsearchweather in SF\n64 degrees\n\nWhen you are done, respond with a final answer between . For example:\n\nThe weather in SF is 64 degrees\n\nBegin!\n\nPrevious Conversation:\n{chat_history}\n\nQuestion: {input}\n{agent_scratchpad}", "temperature": 0.8 }, + "tweaks": { + + }, "timeout_ms": 50000 }, { "name": "OpusToolAgentWithSearchAndWebRepoCloud", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -518,11 +734,15 @@ "SystemMessage": "You are a helpful assistant. Help the user answer any questions.\n\nYou have access to the following tools:\n\n{tools}\n\nIn order to use a tool, you can use and tags. You will then get back a response in the form \nFor example, if you have a tool called 'search' that could run a google search, in order to search for the weather in SF you would respond:\n\nsearchweather in SF\n64 degrees\n\nWhen you are done, respond with a final answer between . For example:\n\nThe weather in SF is 64 degrees\n\nBegin!\n\nPrevious Conversation:\n{chat_history}\n\nQuestion: {input}\n{agent_scratchpad}", "temperature": 0.2 + }, + "tweaks": { + }, "timeout_ms": 50000 }, { "name": "OpusChain", + "engine": "flowise", "protocol": "https", "hostname": "9d81nz4o.rpcld.co", "port": 443, @@ -544,12 +764,16 @@ }, "systemMessagePrompt": "you are a helpful assistant", "serpApiKey": "AMBER_FLUENT_SERPAPI_KEY_01" + }, + "tweaks": { + }, "timeout_ms": 50000 }, { "name": "MakeLeonardoImagePost", + "engine": "webhook", "protocol": "https", "hostname": "hook.us1.make.com", "port": 443, @@ -566,11 +790,15 @@ "presetStyle": "CINEMATIC", "makeAuthentication": "AMBER_MAKE_LEONARDO_IMAGE_POST", "seed": "" + }, + "tweaks": { + }, "timeout_ms": 5000000 }, { "name": "MakeLeonardoImagePostTest", + "engine": "flowise", "protocol": "https", "hostname": "hook.us1.make.com", "port": 443, @@ -587,11 +815,15 @@ "presetStyle": "CINEMATIC", "makeAuthentication": "AMBER_MAKE_LEONARDO_IMAGE_POST", "seed": "" + }, + "tweaks": { + }, "timeout_ms": 5000000 }, { "name": "MakeShopifyAndGhostPostExample", + "engine": "flowise", "protocol": "https", "hostname": "hook.us1.make.com", "port": 443, @@ -608,6 +840,9 @@ "presetStyle": "CINEMATIC", "makeAuthentication": "AMBER_MAKE_SHOPIFY_GHOST_POST_KEY", "seed": "" + }, + "tweaks": { + }, "timeout_ms": 5000000 } diff --git a/fluent_cli/fluent_cli_autocomplete.sh b/fluent_cli/fluent_cli_autocomplete.sh index 6de0170..28fd298 100644 --- a/fluent_cli/fluent_cli_autocomplete.sh +++ b/fluent_cli/fluent_cli_autocomplete.sh @@ -1,9 +1,51 @@ - # Assuming FLUENT_CLI_CONFIG_PATH points to a JSON file containing configuration autocomplete_flows() { - local current_word="${COMP_WORDS[COMP_CWORD]}" - local flow_names=$(jq -r '.[].name' "$FLUENT_CLI_CONFIG_PATH") - COMPREPLY=($(compgen -W "${flow_names}" -- "$current_word")) + local i cur prev opts cmd + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + prev="${COMP_WORDS[COMP_CWORD-1]}" + cmd="" + opts="" + + # Define command options + opts="-o -f -a -u -g -p -z -m -d -n -l -w -h -V --system-prompt-override-inline --system-prompt-override-file --additional-context-file --upload-file-path --generate-autocomplete --parse-code-output --full-output --markdown-output --download-media --upsert-no-upload --upsert-with-upload --webhook --help --version [flowname] [request] [context]" + + for i in "${COMP_WORDS[@]}"; do + case "${cmd},${i}" in + ",$1") + cmd="fluent" + ;; + *) + ;; + esac + done + + case "${cmd}" in + fluent) + # Add flow name autocomplete from JSON file + if [[ ${cur} == *[a-zA-Z]* && ${prev} == "fluent" ]]; then + local flow_names=$(jq -r '.[].name' "$FLUENT_CLI_CONFIG_PATH") + COMPREPLY=($(compgen -W "${flow_names}" -- "${cur}")) + return 0 + fi + + if [[ ${cur} == -* || ${COMP_CWORD} -eq 1 ]]; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + + case "${prev}" in + --system-prompt-override-inline|--system-prompt-override-file|--additional-context-file|--upload-file-path|--download-media|--upsert-no-upload|--upsert-with-upload) + COMPREPLY=($(compgen -f -- "${cur}")) + return 0 + ;; + esac + ;; + esac } -complete -F autocomplete_flows fluent +if [[ "${BASH_VERSINFO[0]}" -eq 4 && "${BASH_VERSINFO[1]}" -ge 4 || "${BASH_VERSINFO[0]}" -gt 4 ]]; then + complete -F autocomplete_flows -o nosort -o bashdefault -o default fluent +else + complete -F autocomplete_flows -o bashdefault -o default fluent +fi diff --git a/fluent_cli/functional_tests/functional_test_02.sh b/fluent_cli/functional_tests/functional_test_02.sh index fba9482..0fcbdb1 100755 --- a/fluent_cli/functional_tests/functional_test_02.sh +++ b/fluent_cli/functional_tests/functional_test_02.sh @@ -88,40 +88,40 @@ for FLOWNAME in "${FLOWNAMES[@]}"; do "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains the word TheLardCatFellFlatOnTheMat'" run_test "$FLOWNAME" "Combined Stdin and Additional Context Test" \ - "cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'Repeat what I provide for context, and for the outline' --additional-context-file \"$OUTLINE_FILE\"" \ + "cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'Repeat what I provide for context, and for the outline'' --additional-context-file \"$OUTLINE_FILE\"" \ "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains TheLardCatFellFlatOnTheMat and talks about the word northstar or North Star'" run_test "$FLOWNAME" "Base Command Test and --system-prompt-override-inline" \ - "$CLI_PATH $FLOWNAME 'This is a test, respond that this is a test' --system-prompt-override-inline 'You can only reply in german'" \ - "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request is about this is a test and is in german'" + "$CLI_PATH $FLOWNAME 'This is a test, respond that this is a test' --system-prompt-override-inline 'You can only reply in German'" \ + "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request is about this is a test and is in German'" run_test "$FLOWNAME" "Stdin Context Test and --system-prompt-override-inline" \ - "cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'Repeat what I provide for context' --system-prompt-override-inline 'You can only reply in german' " \ - "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request has the word northstar or North Star and is in german'" + "cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'Repeat what I provide for context' --system-prompt-override-inline 'You can only reply in German' " \ + "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request has the word northstar or North Star and is in German'" run_test "$FLOWNAME" "Additional Context File Test and --system-prompt-override-inline" \ - "$CLI_PATH $FLOWNAME 'Repeat what I provide for context, and for the outline' --additional-context-file \"$OUTLINE_FILE\" --system-prompt-override-inline 'You can only reply in german' " \ - "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains the word TheLardCatFellFlatOnTheMat and is in german'" + "$CLI_PATH $FLOWNAME 'Repeat what I provide for context, and for the outline' --additional-context-file \"$OUTLINE_FILE\" --system-prompt-override-inline 'You can only reply in German' " \ + "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains the word TheLardCatFellFlatOnTheMat and is in German'" run_test "$FLOWNAME" "Combined Stdin and Additional Context Test and --system-prompt-override-inline" \ - "cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'Repeat what I provide for context, and for the outline' --additional-context-file \"$OUTLINE_FILE\" --system-prompt-override-inline 'You can only reply in german'" \ - "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains TheLardCatFellFlatOnTheMat and talks about the word northstar or North Star and is in german'" + "cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'Repeat what I provide for context, and for the outline' --additional-context-file \"$OUTLINE_FILE\" --system-prompt-override-inline 'You can only reply in German'" \ + "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains TheLardCatFellFlatOnTheMat and talks about the word northstar or North Star and is in German'" run_test "$FLOWNAME" "Base Command Test and --system-prompt-override-file" \ "$CLI_PATH $FLOWNAME 'This is a test, respond that this is a test' --system-prompt-override-file \"$SYSTEM_PROMPT_FILE\" " \ - "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request is about this is a test and is in spanish'" + "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request is about this is a test and is in Spanish'" run_test "$FLOWNAME" "Stdin Context Test and --system-prompt-override-file" \ "cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'Repeat what I provide for context:' --system-prompt-override-file \"$SYSTEM_PROMPT_FILE\" " \ - "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request has the word northstar or North Star and is in spanish'" + "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request has the word northstar or North Star and is in Spanish'" run_test "$FLOWNAME" "Additional Context File Test and --system-prompt-override-file" \ - "$CLI_PATH $FLOWNAME 'Repeat what I provide for context, and for the outline' --additional-context-file \"$OUTLINE_FILE\" --system-prompt-override-file \"$SYSTEM_PROMPT_FILE\" " \ - "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains the word TheLardCatFellFlatOnTheMat and is in spanish'" + "$CLI_PATH $FLOWNAME 'Repeat what I provide for context, and for the outline' --additional-context-file \"$OUTLINE_FILE\" --system-prompt-override-file \"$SYSTEM_PROMPT_FILE\" " \ + "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains the word TheLardCatFellFlatOnTheMat and is in Spanish'" run_test "$FLOWNAME" "Combined Stdin and Additional Context Test and --system-prompt-override-file" \ - "cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'Repeat what I provide for context, and for the outline' --additional-context-file \"$OUTLINE_FILE\" --system-prompt-override-file \"$SYSTEM_PROMPT_FILE\" " \ - "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains TheLardCatFellFlatOnTheMat and talks about the word northstar or North Star and is in spanish'" + "cat \"$CONTEXT_FILE\" | $CLI_PATH $FLOWNAME 'Repeat what I provide for context, and for the outline' --additional-context-file \"$OUTLINE_FILE\" --system-prompt-override-file \"$SYSTEM_PROMPT_FILE\" " \ + "$VALIDATION_CLI $VALIDATION_FLOWNAME 'Answer PASS or FAIL only if the request contains TheLardCatFellFlatOnTheMat and talks about the word northstar or North Star and is in Spanish'" done diff --git a/fluent_cli/shell_scripts/llama_generated_long_form_blog_creator.sh b/fluent_cli/shell_scripts/llama_generated_long_form_blog_creator.sh index 50c0f44..7f1815e 100755 --- a/fluent_cli/shell_scripts/llama_generated_long_form_blog_creator.sh +++ b/fluent_cli/shell_scripts/llama_generated_long_form_blog_creator.sh @@ -33,23 +33,30 @@ call_fluent() { local input_request="$2" local output_file="$3" - echo "$input_request" | fluent "$flow_name" ' ' > "$output_file" 2>/dev/null + echo "$input_request" | fluent "$flow_name" ' ' > "$output_file" if [ $? -ne 0 ]; then echo "Failed to process $flow_name" exit 1 fi - echo "$flow_name output has been saved to $output_file" + echo -e "$flow_name output has been saved to $output_file\n" } verify_step() { local file="$1" local step="$2" - cat "$file" + + echo -e "Review the content of $file for the $step step:\n" + echo -e "\t\t---------------------------------------------------------------------------------\n\n" + mdcat "$file" --ansi # 'plain' removes git integration and headers + echo -e "\n" + echo -e "\n" + echo -e "\t\t---------------------------------------------------------------------------------\n" read -p "Is this $step correct? (yes/no) " approval if [ "$approval" != "yes" ]; then - echo "$step approval failed. Exiting." + echo "Error: $step approval failed. Exiting." exit 1 fi + echo "$step approved." } # Define file paths @@ -64,24 +71,24 @@ call_fluent "$FLOW_STORY_ARC" "In paragraph form create a story arc about the re verify_step "$STORY_ARC_FILE" "story arc" # Generate character map -cat "$STORY_ARC_FILE" | fluent "$FLOW_CHARACTER_MAP" "Generate a complete creative character map for this story arc context" > "$CHARACTER_MAP_FILE" -echo "Character map generated" +cat "$STORY_ARC_FILE" | fluent "$FLOW_CHARACTER_MAP" "Generate a complete creative character map for this story arc context. Output just the character map." > "$CHARACTER_MAP_FILE" verify_step "$CHARACTER_MAP_FILE" "character map" # Generate outline -cat "$CHARACTER_MAP_FILE" "$STORY_ARC_FILE" | fluent "$FLOW_OUTLINE" "Create an extensive creative detailed outline for this story and provided context" > "$OUTLINE_FILE" -echo "Outline generated" +cat "$CHARACTER_MAP_FILE" "$STORY_ARC_FILE" | fluent "$FLOW_OUTLINE" "Create an extensive creative detailed outline for this story and provided context. Output just the outline." > "$OUTLINE_FILE" verify_step "$OUTLINE_FILE" "outline" # Generate prompts -cat "$CHARACTER_MAP_FILE" "$STORY_ARC_FILE" "$OUTLINE_FILE" | fluent "$FLOW_PROMPTS" "Generate at 20-40 prompts that will tell this story eloquently with creativity, originality, and incredible suspension of disbelief. Output just the prompts sequentially without numbering them" > "$PROMPTS_FILE" +cat "$CHARACTER_MAP_FILE" "$STORY_ARC_FILE" "$OUTLINE_FILE" | fluent "$FLOW_PROMPTS" "Generate 20-40 prompts that will tell this story eloquently with creativity, originality, and incredible suspension of disbelief. Output just the prompts sequentially without numbering them" > "$PROMPTS_FILE" if [ ! -s "$PROMPTS_FILE" ]; then echo "No prompts were generated or there was an error. Exiting." exit 1 fi +verify_step "$PROMPTS_FILE" "prompts" + # Ask for final approval to build the complete story read -p "Generate the complete blog post based on the approved items? (yes/no) " final_approval if [ "$final_approval" != "yes" ]; then @@ -93,17 +100,15 @@ fi : > "$FINAL_POST_FILE" # Clear existing content if any while IFS= read -r prompt; do if [[ -n "$prompt" && "$prompt" =~ [^[:space:]] ]]; then - echo "Processing prompt: $prompt" { cat "$CHARACTER_MAP_FILE" "$STORY_ARC_FILE" tail -n 10 "$FINAL_POST_FILE" - echo "Prompt: $prompt" } | fluent "$FLOW_POST_SECTION" "Generating section based on context prompt. No Yapping. Do not offer unnatural introductions or lead ins. Do not summarize. Just write the section. You have access to the previous 15 lines in the context. Transition eloquently. Never lead-in or start 'Here is my attempt...' or similar. Eloquent transitions only. Use Markdown" >> "$FINAL_POST_FILE" { cat $CHARACTER_MAP_FILE tail -n 10 "$FINAL_POST_FILE" - } | fluent GPT4FunctionAgentWithMemoryAndBrowsingRepoCloud "Create a prompt that will be used to create an image for this context. The image should be in abstract art style. Keep consistent with the provided character map and you align the theme with the provided last 15 lines in the context" | fluent MakeLeonardoImagePost "" -d /Users/n/Downloads/ >> "$FINAL_POST_FILE" + } | fluent HaikuChain "Create a prompt that will be used to create an image for this context. The image should be in abstract art style. Keep consistent with the provided character map and you align the theme with the provided last 15 lines in the context" | fluent MakeLeonardoImagePost "" -d /Users/n/Downloads/ >> "$FINAL_POST_FILE" else echo "Skipped empty or whitespace-only prompt." fi diff --git a/fluent_cli/shell_scripts/prompted_long_form_creator.conf b/fluent_cli/shell_scripts/prompted_long_form_creator.conf index 9d77f52..954d642 100644 --- a/fluent_cli/shell_scripts/prompted_long_form_creator.conf +++ b/fluent_cli/shell_scripts/prompted_long_form_creator.conf @@ -6,8 +6,8 @@ PROMPTS_PATH="/Users/n/Downloads/prompts.txt" FINAL_POST_PATH="/Users/n/Downloads/complete_blog_post.txt" # Flow names for each operation -FLOW_OUTLINE="GroqLLama370b8192AgentRepoCloud" +FLOW_OUTLINE="LocalGoogleGeminiChain" FLOW_STORY_ARC="SonnetChain" -FLOW_CHARACTER_MAP="GroqLLama370b8192AgentRepoCloud" -FLOW_PROMPTS="SonnetChain" -FLOW_POST_SECTION="OpusChain" +FLOW_CHARACTER_MAP="SonnetChain" +FLOW_PROMPTS="GPTChainRepoCloud" +FLOW_POST_SECTION="GroqLLama370b8192AgentRepoCloud" diff --git a/fluent_cli/src/client.rs b/fluent_cli/src/client.rs index d364869..856e21c 100644 --- a/fluent_cli/src/client.rs +++ b/fluent_cli/src/client.rs @@ -1,19 +1,19 @@ use log::{debug, error}; use std::env; -use reqwest::{Client, multipart}; -use reqwest::header::{HeaderMap, HeaderValue, AUTHORIZATION}; +use reqwest::{Client}; + use serde_json::{json, Value}; -use std::time::Duration; + use crate::config::{FlowConfig, replace_with_env_var}; use serde::{Deserialize, Serialize}; use serde_json::Result; use tokio::fs::File; -use tokio::io; + use tokio::io::AsyncReadExt; -use crate::client; -use serde_yaml::to_string as to_yaml; // Add serde_yaml to your Cargo.toml if not already included + + // Add serde_yaml to your Cargo.toml if not already included #[derive(Serialize, Deserialize, Debug)] @@ -28,6 +28,7 @@ struct FluentCliOutput { pub(crate) session_id: String, #[serde(rename = "memoryType")] memory_type: Option, + } #[derive(Serialize, Deserialize, Debug)] @@ -62,6 +63,131 @@ struct ResponseOutput { pretty_text: Option, // Only populated if `--parse-code-output` is not present } + +// New structure to handle LangFlow output +#[derive(Serialize, Deserialize, Debug)] +struct LangFlowOutput { + pub(crate) session_id: String, + pub(crate) outputs: Vec, +} + +#[derive(Serialize, Deserialize, Debug)] +struct LangFlowOutputDetail { + pub(crate) inputs: LangFlowInput, + pub(crate) outputs: Vec, +} + +#[derive(Serialize, Deserialize, Debug)] +struct LangFlowInput { + pub(crate) input_value: String, +} + +#[derive(Serialize, Deserialize, Debug)] +struct LangFlowResultDetail { + pub(crate) results: LangFlowResult, + pub(crate) artifacts: Option, + pub(crate) messages: Vec, + #[serde(rename = "component_display_name")] + pub(crate) component_display_name: String, + #[serde(rename = "component_id")] + pub(crate) component_id: String, +} + +#[derive(Serialize, Deserialize, Debug)] +struct LangFlowResult { + pub(crate) result: String, +} + +#[derive(Serialize, Deserialize, Debug)] +struct LangFlowArtifacts { + pub(crate) message: String, + pub(crate) sender: String, + #[serde(rename = "sender_name")] + pub(crate) sender_name: String, +} + +#[derive(Serialize, Deserialize, Debug)] +struct LangFlowMessage { + pub(crate) message: String, + pub(crate) sender: String, + #[serde(rename = "sender_name")] + pub(crate) sender_name: String, + #[serde(rename = "component_id")] + pub(crate) component_id: String, +} + +// Example function to demonstrate using these structures +pub async fn handle_langflow_response(response_body: &str, matches: &clap::ArgMatches) -> Result<()> { + debug!("LangFlow response body: {}", response_body); + let result = serde_json::from_str::(response_body); + debug!("Parsed LangFlow result: {:?}", result); + + match result { + Ok(lang_flow_output) => { + // Concatenate all results and messages + let response_text = lang_flow_output.outputs.iter() + .flat_map(|output| output.outputs.iter().map(|detail| detail.results.result.clone())) + .collect::>() + .join("\n"); + + if let Some(directory) = matches.value_of("download-media") { + let urls = extract_urls(&response_text); // Adjust the URL extraction as needed + download_media(urls, directory).await; + } + + if matches.is_present("markdown-output") { + pretty_format_markdown(&response_text); + } else if matches.is_present("parse-code-output") { + let code_blocks = extract_code_blocks(&response_text); + for block in code_blocks { + println!("{}", block); + } + } else if matches.is_present("full-output") { + println!("{}", response_body); // Output the full raw response + } else { + println!("{}", response_text); // Default output + } + }, + Err(e) => { + eprintln!("Error parsing LangFlow response: {:?}", e); + if let Some(directory) = matches.value_of("download-media") { + let urls = extract_urls(response_body); // Fallback to raw response + download_media(urls, directory).await; + } + println!("{}", response_body); // Print raw response if there is a parsing error + } + } + + Ok(()) +} + +use terminal_size::{Width, terminal_size}; + +pub fn generate_tick_strings() -> Vec { + if let Some((Width(w), _)) = terminal_size() { + let total_width = w as usize; + let bar_length = total_width / 3; // Calculate 33% of the terminal width + let mut ticks = vec![]; + let tick_symbol = "⫸"; + let mut current_length = 0; + + while current_length < bar_length { + let tick = tick_symbol.repeat(current_length / 2 + 1); + ticks.push(tick); + current_length += 2; + } + + // Add reverse order to complete the cycle + let mut reverse_ticks = ticks.clone(); + reverse_ticks.reverse(); + ticks.extend(reverse_ticks); + ticks + } else { + vec!["".to_string(); 20] // Default fallback if terminal size cannot be determined + } +} + + use serde_json::Error as SerdeError; pub async fn handle_response(response_body: &str, matches: &clap::ArgMatches) -> Result<()> { @@ -70,7 +196,7 @@ pub async fn handle_response(response_body: &str, matches: &clap::ArgMatches) -> let result = serde_json::from_str::(response_body); debug!("Result: {:?}", result); // If there's an error parsing the JSON, print the error and the raw response body - let response_text = match result { + let _response_text = match result { Ok(parsed_output) => { // If parsing is successful, use the parsed data debug!("{:?}", parsed_output); @@ -79,8 +205,7 @@ pub async fn handle_response(response_body: &str, matches: &clap::ArgMatches) -> download_media(urls, directory).await; } if matches.is_present("markdown-output") { - pretty_format_markdown(&parsed_output.text); // Ensure text is obtained correctly - + pretty_format_markdown(&parsed_output.text); // Output the text used, whether parsed or raw, but only if the --markdown-output flag is not } else if matches.is_present("parse-code-output") { let code_blocks = extract_code_blocks(&parsed_output.text); for block in code_blocks { @@ -104,6 +229,7 @@ pub async fn handle_response(response_body: &str, matches: &clap::ArgMatches) -> } debug!("Download Response body: {}", response_body); println!("{}", response_body); + eprint!("\n\n"); response_body.to_string(); } }; @@ -119,9 +245,47 @@ fn extract_urls(text: &str) -> Vec { .collect() } +use term_size; +pub fn print_full_width_bar(string: &str) -> String { + let buffer = 1; + let width = terminal_size::terminal_size().map(|(terminal_size::Width(w), _)| w as usize).unwrap_or(80); + string.repeat(width).dark_yellow().to_string() +} + +pub fn print_nearly_full_width_bar() -> String { + let total_width = terminal_size::terminal_size() + .map(|(terminal_size::Width(w), _)| w as usize) + .unwrap_or(80); // Default to 80 if terminal size can't be determined + + let bar_length = total_width * 85 / 100; // Calculate 90% of the terminal width for the bar length + let padding = (total_width - bar_length) / 2; // Calculate padding to center the bar + + let bar = "-".repeat(bar_length).yellow().to_string(); + format!("{:padding$}{}{:padding$}", "", bar, "", padding = padding) +} +use termimad::*; + fn pretty_format_markdown(markdown_content: &str) { - let skin = MadSkin::default(); // Assuming `termimad` is used - skin.print_text(markdown_content); // Render to a string + let mut skin = MadSkin::default(); // Assuming `termimad` is used + skin.bold.set_fg(crossterm::style::Color::Yellow); + skin.italic.set_fg(crossterm::style::Color::Blue); + skin.headers[0].set_fg(crossterm::style::Color::Yellow); + // skin.headers[0].set_bg(crossterm::style::Color::Black); + skin.headers[1].set_fg(crossterm::style::Color::Green); + //skin.headers[1].set_bg(crossterm::style::Color::Black); + skin.headers[2].set_fg(crossterm::style::Color::Blue); + //skin.inline_code.set_bg(crossterm::style::Color::Black); + skin.inline_code.set_fg(crossterm::style::Color::White); + //skin.code_block.set_bg(crossterm::style::Color::Black); + skin.code_block.set_fg(crossterm::style::Color::White); + //skin.set_bg(crossterm::style::Color::Black); + + skin.paragraph.left_margin = 4;; + skin.paragraph.right_margin = 4; + + let formatted_text = skin.print_text(markdown_content); // skin.display_markdown(&format!("\n{}\n", markdown_content))); // + // skin.term_text(&format!("\n{}\n", markdown_content))); // + formatted_text } fn extract_code_blocks(markdown_content: &str) -> Vec { @@ -141,7 +305,7 @@ pub fn parse_fluent_cli_output(json_data: &str) -> Result { use reqwest; use tokio::io::AsyncWriteExt; use chrono::Local; -use anyhow::{Context}; + // Correct definition of the function returning a Result with a boxed dynamic error @@ -167,7 +331,7 @@ async fn download_media(urls: Vec, directory: &str) { format!("download-{}.dat", Local::now().format("%Y%m%d%H%M%S")) }; let filepath = Path::new(directory).join(filename); - + debug!("Downloading: {}\nto: {}\n", clean_url, filepath.display()); match File::create(&filepath).await { Ok(mut file) => { if let Err(e) = file.write_all(&content).await { @@ -246,11 +410,11 @@ pub async fn process_webhook_payload(flow: &FlowConfig, request: &str, file_cont let url = format!("{}://{}:{}{}{}", flow.protocol, flow.hostname, flow.port, flow.request_path, flow.chat_id); - let mut request_builder = client.post(&url); + let request_builder = client.post(&url); let mut form = Form::new(); - let mut file_paths_clone = file_path.clone(); + let file_paths_clone = file_path.clone(); for file_path_item in file_paths_clone.iter() { let path = Path::new(file_path_item); @@ -413,7 +577,7 @@ pub async fn upsert_with_json(api_url: &str, flow: &FlowConfig, payload: serde_j pub async fn upload_files(api_url: &str, file_paths: Vec<&str>) -> Result<()> { let client = Client::new(); let mut form = Form::new(); - let mut file_paths_clone = file_paths.clone(); + let file_paths_clone = file_paths.clone(); for file_path in file_paths { let path = Path::new(file_path); @@ -463,36 +627,62 @@ pub async fn upload_files(api_url: &str, file_paths: Vec<&str>) -> Result<()> { use tokio::fs::File as TokioFile; // Alias to avoid confusion with std::fs::File use tokio::io::{AsyncReadExt as TokioAsyncReadExt, Result as IoResult}; use base64::encode; -use std::collections::HashMap; -use std::io::ErrorKind; + + use std::path::Path; use clap::ArgMatches; -use pulldown_cmark::{Event, Parser, Tag}; + + + use regex::Regex; use reqwest::multipart::{Form, Part}; use serde::de::Error; -use termimad::{FmtText, MadSkin}; -use termimad::minimad::once_cell::sync::Lazy; -use tokio_util::codec::{BytesCodec, FramedRead}; +use termimad::{MadSkin}; +use termimad::crossterm::style::Stylize; pub(crate) async fn prepare_payload(flow: &FlowConfig, question: &str, file_path: Option<&str>, actual_final_context: Option, cli_args: &ArgMatches, file_contents: &str, ) -> IoResult { let mut override_config = flow.override_config.clone(); + let mut tweaks_config = flow.tweaks.clone(); + debug!("Override config before update: {:?}", override_config); + debug!("Tweaks config before update: {:?}", tweaks_config); + replace_with_env_var(&mut tweaks_config); replace_with_env_var(&mut override_config); // Update config with env variables debug!("Override config after update: {:?}", override_config); + debug!("Tweaks config after update: {:?}", tweaks_config); let full_question = actual_final_context.as_ref().map_or_else( || question.to_string(), |ctx| format!("\n{}\n{}\n", question, ctx) ); - let mut body = json!({ - "question": full_question, - "overrideConfig": override_config, - }); + let mut body = match flow.engine.as_str() { + "flowise" | "webhook" => { + serde_json::json!({ + "question": full_question, + "overrideConfig": override_config, + }) + }, + "langflow" => { + let mut tweaks_config = flow.tweaks.clone(); + replace_with_env_var(&mut tweaks_config); + serde_json::json!({ + "question": full_question, + "tweaks": tweaks_config, + }) + }, + _ => { + serde_json::json!({ + "question": full_question, + "overrideConfig": override_config, + }) + } + }; + + if cli_args.is_present("upload-image-path") && file_path.is_some() { let path = file_path.unwrap(); @@ -509,7 +699,7 @@ pub(crate) async fn prepare_payload(flow: &FlowConfig, question: &str, file_path body.as_object_mut().unwrap().insert("uploads".to_string(), uploads); } - if cli_args.is_present("webhook") { + if flow.engine == "webhook" { let webhook_details = json!({ "question": question.to_string(), "context": actual_final_context.unwrap_or_default(), diff --git a/fluent_cli/src/config.rs b/fluent_cli/src/config.rs index 4151cd8..3818bdf 100644 --- a/fluent_cli/src/config.rs +++ b/fluent_cli/src/config.rs @@ -1,5 +1,5 @@ use serde::{Deserialize, Serialize}; -use serde_json::{Value, json}; +use serde_json::{Value}; use std::fs::File; use std::io::Read; use std::env; @@ -21,10 +21,12 @@ pub struct FlowConfig { pub bearer_token: String, #[serde(rename = "overrideConfig")] pub override_config: Value, + pub tweaks: Value, pub timeout_ms: Option, pub protocol: String, pub webhook_url: Option, pub webhook_headers: Option, + pub engine: String, } @@ -86,7 +88,13 @@ impl EnvVarGuard { // Assume overrideConfig might also have Amber encrypted keys self.decrypt_amber_keys_in_value(&mut flow.override_config)?; debug!("Decrypted keys: {:?}", self.keys); + + self.decrypt_amber_keys_in_value(&mut flow.tweaks)?; + debug!("Decrypted keys: {:?}", self.keys); + Ok(()) + + } fn decrypt_amber_keys_in_value(&mut self, value: &mut Value) -> Result<(), Box> { @@ -111,7 +119,7 @@ impl EnvVarGuard { Ok(()) } - fn set_env_var_from_amber(&mut self, env_key: &str, amber_key: &str) -> Result<(), Box> { + fn set_env_var_from_amber(&mut self, _env_key: &str, amber_key: &str) -> Result<(), Box> { let output = Command::new("amber") .args(&["print"]) .output()?; diff --git a/fluent_cli/src/main.rs b/fluent_cli/src/main.rs index 6e2628d..e297674 100644 --- a/fluent_cli/src/main.rs +++ b/fluent_cli/src/main.rs @@ -1,44 +1,57 @@ mod config; mod client; -use ::config::Value; -use clap::{App, Arg, Command}; + +use clap::{Arg, Command}; use tokio; -use log::{info, warn, error, debug}; +use log::{debug}; use env_logger; use tokio::fs::File; -use tokio::io::{self, AsyncReadExt}; -use crate::client::handle_response; +use tokio::io::{AsyncReadExt}; +use crate::client::{generate_tick_strings, handle_response, print_full_width_bar}; use crate::config::{EnvVarGuard, generate_bash_autocomplete_script, replace_with_env_var}; -use anyhow::Result; + use colored::*; // Import the colored crate -use colored::control::*; -fn print_status(flowname: &str, request: &str, new_question: &str) { - eprintln!( - "{}{}Fluent:\t\t{}\nRequest:\t{}\nContext:\n{}\n{}{}", - "⫸⫸⫸⫸⫸ ".bright_yellow().bold(), - "\n".normal(), +use serde::de::Error as SerdeError; + +use indicatif::{ProgressBar, ProgressStyle}; +use std::time::Duration; + +use colored::*; // Ensure the colored crate is included + // Ensure you've added `term_size` to your Cargo.toml + + + +fn print_status(spinner: &ProgressBar, flowname: &str, request: &str, new_question: &str) { + spinner.set_message(format!( + "\n{}\t{}\n{}\t{}\n{}\n{}\n", + + "Flow: ".purple().italic(), flowname.bright_blue().bold(), + "Request:".purple().italic(), request.bright_blue().italic(), + "Context:".purple().italic(), new_question.bright_green(), - "⫸⫸⫸⫸⫸ ".bright_yellow().bold(), - "\n".normal() - ); + + )); } +use anyhow::{Context, Result}; +use crossterm::style::Stylize; +use tokio::time::Instant; // use env_logger; // Uncomment this when you are using it to initialize logs #[tokio::main] -async fn main() -> Result<(), Box> { +async fn main() -> Result<()> { env_logger::init(); colored::control::set_override(true); - let mut configs = config::load_config()?; - let mut configs_clone = configs.clone(); + let mut configs = config::load_config().unwrap(); + let configs_clone = configs.clone(); let matches = Command::new("Fluent") .version("0.3.0") @@ -139,7 +152,7 @@ async fn main() -> Result<(), Box> { } let flowname = matches.value_of("flowname").unwrap(); - let flow = configs.iter_mut().find(|f| f.name == flowname).expect("Flow not found"); + let flow = configs.iter_mut().find(|f| f.name == flowname).context("Flow not found")?; let flow_clone = flow.clone(); let flow_clone2 = flow.clone(); let flow_clone3 = flow.clone(); @@ -185,7 +198,7 @@ async fn main() -> Result<(), Box> { let file_path = matches.value_of("upload-image-path"); - let file_path_clone = matches.value_of("upload-image-path").clone(); + let _file_path_clone = matches.value_of("upload-image-path").clone(); // Determine the final context from various sources @@ -205,7 +218,7 @@ async fn main() -> Result<(), Box> { (Some(cli_context), true) => Some(cli_context.to_string()), (None, true) => None, }; - let actual_final_context_clone = actual_final_context.clone(); + let _actual_final_context_clone = actual_final_context.clone(); let actual_final_context_clone2 = actual_final_context.clone(); debug!("Actual Final context: {:?}", actual_final_context); @@ -217,7 +230,7 @@ async fn main() -> Result<(), Box> { // Decrypt the keys in the flow config let mut env_guard = EnvVarGuard::new(); - let env_guard_result = env_guard.decrypt_amber_keys_for_flow(flow)?; + let env_guard_result = env_guard.decrypt_amber_keys_for_flow(flow).unwrap(); debug!("EnvGuard result: {:?}", env_guard_result); // Within the main function after parsing command-line arguments @@ -232,7 +245,7 @@ async fn main() -> Result<(), Box> { debug!("API URL: {}", api_url); // Call the upload function in the client module if let Err(e) = client::upload_files(&api_url, file_paths).await { - eprintln!("Error uploading files: {}", e); + eprintln!("Error uploading files: {}", e.to_string()); } } @@ -265,15 +278,80 @@ async fn main() -> Result<(), Box> { // Use the merged override config as the payload if let Err(e) = client::upsert_with_json(&api_url, &flow_clone3, serde_json::json!({"overrideConfig": override_config})).await { - eprintln!("Error during JSON upsert: {}", e); + eprintln!("Error during JSON upsert: {}", e.to_string()); } } - print_status(flowname, request, actual_final_context_clone2.as_ref().unwrap_or(&new_question).as_str()); + let spinner = ProgressBar::new_spinner(); + let tick_strings = generate_tick_strings(); + spinner.set_style(ProgressStyle::default_spinner() + .tick_strings(&[ + "", + "⫸ ", + "⫸⫸ ", + "⫸⫸⫸ ", + "⫸⫸⫸⫸ ", + "💛⫸⫸⫸⫸ ", + "⫸💛⫸⫸⫸ ", + "⫸⫸💛⫸⫸ ", + "⫸⫸⫸💛⫸ ", + "⫸⫸⫸⫸💛 ", + "⫸⫸⫸💛⫷ ", + "⫸⫸💛⫷⫷ ", + "⫸💛⫷⫷⫷ ", + "💛⫷⫷⫷⫷ ", + "⫷⫷⫷⫷ ", + "⫷⫷⫷ ", + "⫷⫷ ", + "⫷ ", + " ", + + ]) + .template("{spinner:.yellow}{msg}{spinner:.yellow}") + .expect("Failed to set progress style")); + spinner.enable_steady_tick(Duration::from_millis(300)); + + + let engine_type = &flow.engine; + let start_time = Instant::now(); + + print_status(&spinner, flowname, request, actual_final_context_clone2.as_ref().unwrap_or(&new_question).as_str()); + spinner.tick(); + debug!("Preparing Payload"); let payload = crate::client::prepare_payload(&flow, request, file_path, actual_final_context_clone2, &cli_args, &file_contents_clone ).await?; let response = crate::client::send_request(&flow, &payload).await?; debug!("Handling Response"); - handle_response(response.as_str(), &matches).await?; + + let duration = start_time.elapsed(); + spinner.finish_with_message(format!( + "\n{}\n\n\t{} {}\n\t{} {}\n\t{} {}\n\n{}\n", + client::print_full_width_bar("■"), + "Flow: ".grey().italic(), + flowname.purple().italic(), + "Request: ".grey().italic(), + request.bright_blue().italic(), + "Duration: ".grey().italic(), + format!("{:.4}s", duration.as_secs_f32()).green().italic(), // Apply bright yellow color to duration + client::print_full_width_bar("-") + )); + + //spinner.finish_and_clear(); + match engine_type.as_str() { + "flowise" | "webhook" => { + // Handle Flowise output + handle_response(response.as_str(), &matches).await + }, + "langflow" => { + // Handle LangFlow output + client::handle_langflow_response(response.as_str(), &matches).await + }, + _ => Ok({ + // Handle default outputser); + serde_json::Error::custom("Unsupported engine type"); + }) + }.expect("TODO: panic message"); + eprint!("\n\n{}\n\n", print_full_width_bar("■")); + Ok(()) }