From f147dbfd61ec6deac95e98d392c389b928854b81 Mon Sep 17 00:00:00 2001 From: Rbb666 Date: Sun, 2 Feb 2025 12:07:53 +0800 Subject: [PATCH] add llmchat online package --- ai/Kconfig | 1 + ai/llmchat/Kconfig | 91 +++++++++++++++++++++++++++++++++++++++++ ai/llmchat/package.json | 32 +++++++++++++++ 3 files changed, 124 insertions(+) create mode 100644 ai/llmchat/Kconfig create mode 100644 ai/llmchat/package.json diff --git a/ai/Kconfig b/ai/Kconfig index e907e45959..233db460ac 100644 --- a/ai/Kconfig +++ b/ai/Kconfig @@ -11,5 +11,6 @@ source "$PKGS_DIR/packages/ai/quest/Kconfig" source "$PKGS_DIR/packages/ai/naxos/Kconfig" source "$PKGS_DIR/packages/ai/ncnn/Kconfig" source "$PKGS_DIR/packages/ai/r-tinymaix/Kconfig" +source "$PKGS_DIR/packages/ai/llmchat/Kconfig" endmenu diff --git a/ai/llmchat/Kconfig b/ai/llmchat/Kconfig new file mode 100644 index 0000000000..c2ef716ad2 --- /dev/null +++ b/ai/llmchat/Kconfig @@ -0,0 +1,91 @@ + +# Kconfig file for package llmchat +menuconfig PKG_USING_LLMCHAT + bool "Large Language Models(LLM) for RT-Thread" + select PKG_USING_CJSON + select PKG_USING_WEBCLIENT + select PKG_USING_MBEDTLS + select PKG_USING_MBEDTLS_DIGICERT_ROOT_CA + select PKG_USING_MBEDTLS_CLOBALSIGN_ROOT_CA + default n + +if PKG_USING_LLMCHAT + + config PKG_USING_LLMCHAT_PATH + string + default "/packages/ai/llm" + + choice + prompt "select llm model" + default PKG_LLM_USING_QWEN_CLOUD + config PKG_LLM_USING_DOUBAO_CLOUD + bool "doubao llm ai port" + config PKG_LLM_USING_QWEN_CLOUD + bool "qwen llm ai port" + endchoice + + if PKG_LLM_USING_QWEN_CLOUD + config PKG_LLM_QWEN_API_KEY + string "llm qwen user api key" + default "sk-**-**" + help + qwen llm api key + + config PKG_LLM_QWEN_MODEL_NAME + string "llm qwen model name" + default "qwen-plus" + help + qwen llm model name + + config PKG_LLM_QWEN_API_URL + string "llm qwen model api url" + default "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions" + help + qwen llm model api url links + endif + + if PKG_LLM_USING_DOUBAO_CLOUD + config PKG_LLM_DOUBAO_API_KEY + string "llm doubao user api key" + default "f4de4d**-**" + help + doubao llm api key + + config PKG_LLM_DOUBAO_MODEL_ID + string "llm doubao user model id" + default "ep-2024**-**" + help + reasoning about the access point ID + + config PKG_LLM_DOUBAO_API_URL + string "llm doubao model api url" + default "https://ark.cn-beijing.volces.com/api/v3/chat/completions" + help + doubao llm model api url links + endif + + choice + prompt "Version" + default PKG_USING_LLMCHAT_V10000 + help + Select the llm version + + config PKG_USING_LLMCHAT_V10000 + bool "v1.0.0" + + config PKG_USING_LLMCHAT_LATEST_VERSION + bool "latest" + + endchoice + + config PKG_USING_LLMCHAT_VER + string + default "v1.0.0" if PKG_USING_LLMCHAT_V10000 + default "latest" if PKG_USING_LLMCHAT_LATEST_VERSION + + config PKG_USING_LLMCHAT_VER_NUM + hex + default 0x99999 if PKG_USING_LLMCHAT_LATEST_VERSION + default 0x10000 if PKG_USING_LLMCHAT_V10000 + +endif diff --git a/ai/llmchat/package.json b/ai/llmchat/package.json new file mode 100644 index 0000000000..f4f443ecc1 --- /dev/null +++ b/ai/llmchat/package.json @@ -0,0 +1,32 @@ +{ + "name": "llmchat", + "description": "Using the LLM Language Model in RT-Thread.", + "description_zh": "在 RT-Thread 上进行 LLM 大语言模型对话", + "enable": "PKG_USING_LLMCHAT", + "keywords": [ + "ai", + "llm" + ], + "category": "ai", + "author": { + "name": "Rbb666", + "email": "751061401@qq.com", + "github": "https://github.com/Rbb666" + }, + "license": "MIT", + "repository": "https://github.com/Rbb666/llm_chat", + "homepage": "https://github.com/Rbb666/llm_chat#readme", + "site": [ + { + "version": "v1.0.0", + "URL": "https://github.com/Rbb666/llm_chat/archive/refs/tags/1.0.0.zip", + "filename": "llm_chat-1.0.0.zip" + }, + { + "version": "latest", + "URL": "https://github.com/Rbb666/llm_chat.git", + "filename": "", + "VER_SHA": "main" + } + ] +}