This commit is contained in:
Sean Hatfield 2024-04-26 17:08:45 -07:00 committed by GitHub
commit 34027c6e18
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 343 additions and 0 deletions

View File

@ -86,6 +86,23 @@ function extensions(app) {
}
);
app.post(
"/ext/website-depth",
[verifyPayloadIntegrity],
async function (request, response) {
try {
const websiteDepth = require("../utils/extensions/WebsiteDepth");
const { url, depth, maxLinks } = reqBody(request);
const scrapedData = await websiteDepth(url, depth, maxLinks);
response.status(200).json({ success: true, data: scrapedData });
} catch (e) {
console.error(e);
response.status(400).json({ success: false, reason: e.message });
}
return;
}
);
app.post(
"/ext/confluence",
[verifyPayloadIntegrity],

View File

@ -0,0 +1,141 @@
const { v4 } = require("uuid");
const {
PuppeteerWebBaseLoader,
} = require("langchain/document_loaders/web/puppeteer");
const { default: slugify } = require("slugify");
const { parse } = require("node-html-parser");
const { writeToServerDocuments } = require("../../files");
const { tokenizeString } = require("../../tokenizer");
const path = require("path");
const fs = require("fs");
async function websiteDepth(startUrl, depth = 1, maxLinks = 20) {
const scrapedData = [];
const visitedUrls = new Set();
const websiteName = new URL(startUrl).hostname;
const outputFolder = path.resolve(
__dirname,
`../../../../server/storage/documents/${slugify(websiteName)}`
);
if (!fs.existsSync(outputFolder)) {
fs.mkdirSync(outputFolder, { recursive: true });
}
async function scrapeLevel(currentLink, currentLevel) {
if (
currentLevel > depth ||
visitedUrls.has(currentLink) ||
visitedUrls.size >= maxLinks
)
return;
visitedUrls.add(currentLink);
console.log(`-- Working URL ${currentLink} --`);
const content = await getPageContent(currentLink);
if (!content.length) {
console.error(`Resulting URL content was empty at ${currentLink}.`);
return;
}
const url = new URL(currentLink);
const filename = (url.host + "-" + url.pathname).replace(".", "_");
const data = {
id: v4(),
url: "file://" + slugify(filename) + ".html",
title: slugify(filename) + ".html",
docAuthor: "no author found",
description: "No description found.",
docSource: "URL link uploaded by the user.",
chunkSource: `link://${currentLink}`,
published: new Date().toLocaleString(),
wordCount: content.split(" ").length,
pageContent: content,
token_count_estimate: tokenizeString(content).length,
};
scrapedData.push(data);
const links = extractLinks(await getPageHTML(currentLink), url.origin);
for (const link of links) {
if (visitedUrls.size >= maxLinks) break;
await scrapeLevel(link, currentLevel + 1);
}
}
await scrapeLevel(startUrl, 0);
for (const data of scrapedData) {
const document = writeToServerDocuments(
data,
`${data.title}`,
outputFolder
);
console.log(
`[SUCCESS]: URL ${data.chunkSource} converted & ready for embedding.\n`
);
}
return scrapedData;
}
async function getPageContent(link) {
try {
const loader = new PuppeteerWebBaseLoader(link, {
launchOptions: { headless: "new" },
gotoOptions: { waitUntil: "domcontentloaded" },
async evaluate(page, browser) {
const result = await page.evaluate(() => document.body.innerText);
await browser.close();
return result;
},
});
const docs = await loader.load();
return docs[0].pageContent;
} catch (error) {
console.error("getPageContent failed to be fetched by Puppeteer.", error);
return null;
}
}
async function getPageHTML(link) {
try {
const loader = new PuppeteerWebBaseLoader(link, {
launchOptions: { headless: "new" },
gotoOptions: { waitUntil: "domcontentloaded" },
async evaluate(page, browser) {
const result = await page.evaluate(() => document.body.innerHTML);
await browser.close();
return result;
},
});
const docs = await loader.load();
return docs[0].pageContent;
} catch (error) {
console.error("getPageHTML failed to be fetched by Puppeteer.", error);
return null;
}
}
function extractLinks(html, baseUrl) {
const root = parse(html);
const links = root.querySelectorAll("a");
const extractedLinks = new Set();
for (const link of links) {
const href = link.getAttribute("href");
if (href && (href.startsWith("/") || href.startsWith(baseUrl))) {
const absoluteUrl = new URL(href, baseUrl).href;
extractedLinks.add(absoluteUrl);
}
}
return Array.from(extractedLinks);
}
module.exports = websiteDepth;

View File

@ -1,10 +1,12 @@
import Github from "./github.svg";
import YouTube from "./youtube.svg";
import Link from "./link.svg";
import Confluence from "./confluence.jpeg";
const ConnectorImages = {
github: Github,
youtube: YouTube,
websiteDepth: Link,
confluence: Confluence,
};

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 9.5 KiB

View File

@ -0,0 +1,135 @@
import React, { useState } from "react";
import System from "@/models/system";
import showToast from "@/utils/toast";
import pluralize from "pluralize";
export default function WebsiteDepthOptions() {
const [loading, setLoading] = useState(false);
const handleSubmit = async (e) => {
e.preventDefault();
const form = new FormData(e.target);
try {
setLoading(true);
showToast("Scraping website - this may take a while.", "info", {
clear: true,
autoClose: false,
});
const { data, error } = await System.dataConnectors.websiteDepth.scrape({
url: form.get("url"),
depth: parseInt(form.get("depth")),
maxLinks: parseInt(form.get("maxLinks")),
});
console.log({ data, error });
if (!!error) {
showToast(error, "error", { clear: true });
setLoading(false);
return;
}
showToast(
`Successfully scraped ${data.length} ${pluralize(
"page",
data.length
)}!`,
"success",
{ clear: true }
);
e.target.reset();
setLoading(false);
} catch (e) {
console.error(e);
showToast(e.message, "error", { clear: true });
setLoading(false);
}
};
return (
<div className="flex w-full">
<div className="flex flex-col w-full px-1 md:pb-6 pb-16">
<form className="w-full" onSubmit={handleSubmit}>
<div className="w-full flex flex-col py-2">
<div className="w-full flex flex-col gap-4">
<div className="flex flex-col pr-10">
<div className="flex flex-col gap-y-1 mb-4">
<label className="text-white text-sm font-bold">
Website URL
</label>
<p className="text-xs font-normal text-white/50">
URL of the website you want to scrape.
</p>
</div>
<input
type="url"
name="url"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
placeholder="https://example.com"
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
<div className="flex flex-col pr-10">
<div className="flex flex-col gap-y-1 mb-4">
<label className="text-white text-sm font-bold">Depth</label>
<p className="text-xs font-normal text-white/50">
Depth of the website scraping (number of levels to scrape).
</p>
</div>
<input
type="number"
name="depth"
min="1"
max="5"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
required={true}
defaultValue="1"
/>
</div>
<div className="flex flex-col pr-10">
<div className="flex flex-col gap-y-1 mb-4">
<label className="text-white text-sm font-bold">
Max Links
</label>
<p className="text-xs font-normal text-white/50">
Maximum number of links to scrape.
</p>
</div>
<input
type="number"
name="maxLinks"
min="1"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
required={true}
defaultValue="20"
/>
</div>
</div>
</div>
<div className="flex flex-col gap-y-2 w-full pr-10">
<button
type="submit"
disabled={loading}
className={`mt-2 w-full ${
loading ? "cursor-not-allowed animate-pulse" : ""
} justify-center border border-slate-200 px-4 py-2 rounded-lg text-[#222628] text-sm font-bold items-center flex gap-x-2 bg-slate-200 hover:bg-slate-300 hover:text-slate-800 disabled:bg-slate-300 disabled:cursor-not-allowed`}
>
{loading ? "Scraping website..." : "Submit"}
</button>
{loading && (
<p className="text-xs text-white/50">
Once complete, all scraped pages will be available for embedding
into workspaces in the document picker.
</p>
)}
</div>
</form>
</div>
</div>
);
}

View File

@ -5,6 +5,7 @@ import YoutubeOptions from "./Connectors/Youtube";
import ConfluenceOptions from "./Connectors/Confluence";
import { useState } from "react";
import ConnectorOption from "./ConnectorOption";
import WebsiteDepthOptions from "./Connectors/WebsiteDepth";
export const DATA_CONNECTORS = {
github: {
@ -21,6 +22,13 @@ export const DATA_CONNECTORS = {
"Import the transcription of an entire YouTube video from a link.",
options: <YoutubeOptions />,
},
"website-depth": {
name: "Website Depth",
image: ConnectorImages.websiteDepth,
description:
"Scrape a website and its links on a page up to a certain depth.",
options: <WebsiteDepthOptions />,
},
confluence: {
name: "Confluence",
image: ConnectorImages.confluence,

View File

@ -60,6 +60,24 @@ const DataConnector = {
});
},
},
websiteDepth: {
scrape: async ({ url, depth, maxLinks }) => {
return await fetch(`${API_BASE}/ext/website-depth`, {
method: "POST",
headers: baseHeaders(),
body: JSON.stringify({ url, depth, maxLinks }),
})
.then((res) => res.json())
.then((res) => {
if (!res.success) throw new Error(res.reason);
return { data: res.data, error: null };
})
.catch((e) => {
console.error(e);
return { data: null, error: e.message };
});
},
},
confluence: {
collect: async function ({ pageUrl, username, accessToken }) {

View File

@ -93,6 +93,27 @@ function extensionEndpoints(app) {
}
}
);
app.post(
"/ext/website-depth",
[validatedRequest, flexUserRoleValid([ROLES.admin, ROLES.manager])],
async (request, response) => {
try {
const responseFromProcessor =
await new CollectorApi().forwardExtensionRequest({
endpoint: "/ext/website-depth",
method: "POST",
body: request.body,
});
await Telemetry.sendTelemetry("extension_invoked", {
type: "website_depth",
});
response.status(200).json(responseFromProcessor);
} catch (e) {
console.error(e);
response.sendStatus(500).end();
}
}
);
}
module.exports = { extensionEndpoints };