anything-llm/collector/processSingleFile/convert/asDocx.js
Sean Hatfield a87014822a
[REFACTOR] Improve asPDF collector processor with pdfjs (#1791)
* WIP replace langchain pdfloader with pdfjs and add more context to each page

* remove extras from pdfjs and just replace langchain library

* remove unneeded dep

* fix console log in docs

---------

Co-authored-by: timothycarambat <rambat1010@gmail.com>
2024-07-03 14:26:48 -07:00

58 lines
1.6 KiB
JavaScript

const { v4 } = require("uuid");
const { DocxLoader } = require("langchain/document_loaders/fs/docx");
const {
createdDate,
trashFile,
writeToServerDocuments,
} = require("../../utils/files");
const { tokenizeString } = require("../../utils/tokenizer");
const { default: slugify } = require("slugify");
async function asDocX({ fullFilePath = "", filename = "" }) {
const loader = new DocxLoader(fullFilePath);
console.log(`-- Working ${filename} --`);
let pageContent = [];
const docs = await loader.load();
for (const doc of docs) {
console.log(`-- Parsing content from docx page --`);
if (!doc.pageContent.length) continue;
pageContent.push(doc.pageContent);
}
if (!pageContent.length) {
console.error(`Resulting text content was empty for ${filename}.`);
trashFile(fullFilePath);
return {
success: false,
reason: `No text content found in ${filename}.`,
documents: [],
};
}
const content = pageContent.join("");
const data = {
id: v4(),
url: "file://" + fullFilePath,
title: filename,
docAuthor: "no author found",
description: "No description found.",
docSource: "pdf file uploaded by the user.",
chunkSource: "",
published: createdDate(fullFilePath),
wordCount: content.split(" ").length,
pageContent: content,
token_count_estimate: tokenizeString(content).length,
};
const document = writeToServerDocuments(
data,
`${slugify(filename)}-${data.id}`
);
trashFile(fullFilePath);
console.log(`[SUCCESS]: ${filename} converted & ready for embedding.\n`);
return { success: true, reason: null, documents: [document] };
}
module.exports = asDocX;