diff --git a/collector/index.js b/collector/index.js index b307b58a45a..73091efcdee 100644 --- a/collector/index.js +++ b/collector/index.js @@ -62,9 +62,13 @@ app.post( "/process-link", [verifyPayloadIntegrity], async function (request, response) { - const { link } = reqBody(request); + const { link, scraperHeaders = {} } = reqBody(request); try { - const { success, reason, documents = [] } = await processLink(link); + const { + success, + reason, + documents = [], + } = await processLink(link, scraperHeaders); response.status(200).json({ url: link, success, reason, documents }); } catch (e) { console.error(e); diff --git a/collector/processLink/convert/generic.js b/collector/processLink/convert/generic.js index a22166d4ce2..fd85d6a61ff 100644 --- a/collector/processLink/convert/generic.js +++ b/collector/processLink/convert/generic.js @@ -8,18 +8,25 @@ const { default: slugify } = require("slugify"); /** * Scrape a generic URL and return the content in the specified format - * @param {string} link - The URL to scrape - * @param {('html' | 'text')} captureAs - The format to capture the page content as - * @param {boolean} processAsDocument - Whether to process the content as a document or return the content directly + * @param {Object} config - The configuration object + * @param {string} config.link - The URL to scrape + * @param {('html' | 'text')} config.captureAs - The format to capture the page content as. Default is 'text' + * @param {boolean} config.processAsDocument - Whether to process the content as a document or return the content directly. Default is true + * @param {{[key: string]: string}} config.scraperHeaders - Custom headers to use when making the request * @returns {Promise} - The content of the page */ -async function scrapeGenericUrl( +async function scrapeGenericUrl({ link, captureAs = "text", - processAsDocument = true -) { + processAsDocument = true, + scraperHeaders = {}, +}) { console.log(`-- Working URL ${link} => (${captureAs}) --`); - const content = await getPageContent(link, captureAs); + const content = await getPageContent({ + link, + captureAs, + headers: scraperHeaders, + }); if (!content.length) { console.error(`Resulting URL content was empty at ${link}.`); @@ -63,13 +70,38 @@ async function scrapeGenericUrl( return { success: true, reason: null, documents: [document] }; } +/** + * Validate the headers object + * - Keys & Values must be strings and not empty + * - Assemble a new object with only the valid keys and values + * @param {{[key: string]: string}} headers - The headers object to validate + * @returns {{[key: string]: string}} - The validated headers object + */ +function validatedHeaders(headers = {}) { + try { + if (Object.keys(headers).length === 0) return {}; + let validHeaders = {}; + for (const key of Object.keys(headers)) { + if (!key?.trim()) continue; + if (typeof headers[key] !== "string" || !headers[key]?.trim()) continue; + validHeaders[key] = headers[key].trim(); + } + return validHeaders; + } catch (error) { + console.error("Error validating headers", error); + return {}; + } +} + /** * Get the content of a page - * @param {string} link - The URL to get the content of - * @param {('html' | 'text')} captureAs - The format to capture the page content as + * @param {Object} config - The configuration object + * @param {string} config.link - The URL to get the content of + * @param {('html' | 'text')} config.captureAs - The format to capture the page content as. Default is 'text' + * @param {{[key: string]: string}} config.headers - Custom headers to use when making the request * @returns {Promise} - The content of the page */ -async function getPageContent(link, captureAs = "text") { +async function getPageContent({ link, captureAs = "text", headers = {} }) { try { let pageContents = []; const loader = new PuppeteerWebBaseLoader(link, { @@ -91,12 +123,37 @@ async function getPageContent(link, captureAs = "text") { }, }); - const docs = await loader.load(); + // Override scrape method if headers are available + let overrideHeaders = validatedHeaders(headers); + if (Object.keys(overrideHeaders).length > 0) { + loader.scrape = async function () { + const { launch } = await PuppeteerWebBaseLoader.imports(); + const browser = await launch({ + headless: "new", + defaultViewport: null, + ignoreDefaultArgs: ["--disable-extensions"], + ...this.options?.launchOptions, + }); + const page = await browser.newPage(); + await page.setExtraHTTPHeaders(overrideHeaders); + + await page.goto(this.webPath, { + timeout: 180000, + waitUntil: "networkidle2", + ...this.options?.gotoOptions, + }); - for (const doc of docs) { - pageContents.push(doc.pageContent); + const bodyHTML = this.options?.evaluate + ? await this.options.evaluate(page, browser) + : await page.evaluate(() => document.body.innerHTML); + + await browser.close(); + return bodyHTML; + }; } + const docs = await loader.load(); + for (const doc of docs) pageContents.push(doc.pageContent); return pageContents.join(" "); } catch (error) { console.error( @@ -112,6 +169,7 @@ async function getPageContent(link, captureAs = "text") { "Content-Type": "text/plain", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36,gzip(gfe)", + ...validatedHeaders(headers), }, }).then((res) => res.text()); return pageText; diff --git a/collector/processLink/index.js b/collector/processLink/index.js index ac0c5916b07..819b1863f94 100644 --- a/collector/processLink/index.js +++ b/collector/processLink/index.js @@ -1,20 +1,37 @@ const { validURL } = require("../utils/url"); const { scrapeGenericUrl } = require("./convert/generic"); -async function processLink(link) { +/** + * Process a link and return the text content. This util will save the link as a document + * so it can be used for embedding later. + * @param {string} link - The link to process + * @param {{[key: string]: string}} scraperHeaders - Custom headers to apply when scraping the link + * @returns {Promise<{success: boolean, content: string}>} - Response from collector + */ +async function processLink(link, scraperHeaders = {}) { if (!validURL(link)) return { success: false, reason: "Not a valid URL." }; - return await scrapeGenericUrl(link); + return await scrapeGenericUrl({ + link, + captureAs: "text", + processAsDocument: true, + scraperHeaders, + }); } /** - * Get the text content of a link + * Get the text content of a link - does not save the link as a document + * Mostly used in agentic flows/tools calls to get the text content of a link * @param {string} link - The link to get the text content of * @param {('html' | 'text' | 'json')} captureAs - The format to capture the page content as * @returns {Promise<{success: boolean, content: string}>} - Response from collector */ async function getLinkText(link, captureAs = "text") { if (!validURL(link)) return { success: false, reason: "Not a valid URL." }; - return await scrapeGenericUrl(link, captureAs, false); + return await scrapeGenericUrl({ + link, + captureAs, + processAsDocument: false, + }); } module.exports = { diff --git a/server/endpoints/api/document/index.js b/server/endpoints/api/document/index.js index 56175142052..0795be7a3f8 100644 --- a/server/endpoints/api/document/index.js +++ b/server/endpoints/api/document/index.js @@ -322,7 +322,11 @@ function apiDocumentEndpoints(app) { type: 'object', example: { "link": "https://anythingllm.com", - "addToWorkspaces": "workspace1,workspace2" + "addToWorkspaces": "workspace1,workspace2", + "scraperHeaders": { + "Authorization": "Bearer token123", + "My-Custom-Header": "value" + } } } } @@ -365,7 +369,11 @@ function apiDocumentEndpoints(app) { */ try { const Collector = new CollectorApi(); - const { link, addToWorkspaces = "" } = reqBody(request); + const { + link, + addToWorkspaces = "", + scraperHeaders = {}, + } = reqBody(request); const processingOnline = await Collector.online(); if (!processingOnline) { @@ -379,8 +387,10 @@ function apiDocumentEndpoints(app) { return; } - const { success, reason, documents } = - await Collector.processLink(link); + const { success, reason, documents } = await Collector.processLink( + link, + scraperHeaders + ); if (!success) { response .status(500) diff --git a/server/swagger/openapi.json b/server/swagger/openapi.json index 3ae58b0e8af..15b041c0878 100644 --- a/server/swagger/openapi.json +++ b/server/swagger/openapi.json @@ -1092,7 +1092,11 @@ "type": "object", "example": { "link": "https://anythingllm.com", - "addToWorkspaces": "workspace1,workspace2" + "addToWorkspaces": "workspace1,workspace2", + "scraperHeaders": { + "Authorization": "Bearer token123", + "My-Custom-Header": "value" + } } } } diff --git a/server/utils/collectorApi/index.js b/server/utils/collectorApi/index.js index 29fc8f5dbac..d7953ce2209 100644 --- a/server/utils/collectorApi/index.js +++ b/server/utils/collectorApi/index.js @@ -101,12 +101,18 @@ class CollectorApi { * Process a link * - Will append the options to the request body * @param {string} link - The link to process + * @param {{[key: string]: string}} scraperHeaders - Custom headers to apply to the web-scraping request URL * @returns {Promise} - The response from the collector API */ - async processLink(link = "") { + async processLink(link = "", scraperHeaders = {}) { if (!link) return false; - const data = JSON.stringify({ link, options: this.#attachOptions() }); + const data = JSON.stringify({ + link, + scraperHeaders, + options: this.#attachOptions(), + }); + return await fetch(`${this.endpoint}/process-link`, { method: "POST", headers: {