anything-llm/server/models/cacheData.js
Timothy Carambat be9d8b0397
Infinite prompt input and compression implementation (#332)
* WIP on continuous prompt window summary

* wip

* Move chat out of VDB
simplify chat interface
normalize LLM model interface
have compression abstraction
Cleanup compressor
TODO: Anthropic stuff

* Implement compression for Anythropic
Fix lancedb sources

* cleanup vectorDBs and check that lance, chroma, and pinecone are returning valid metadata sources

* Resolve Weaviate citation sources not working with schema

* comment cleanup
2023-11-06 13:13:53 -08:00

70 lines
1.6 KiB
JavaScript

const prisma = require("../utils/prisma");
const CacheData = {
new: async function (inputs = {}) {
try {
const cache = await prisma.cache_data.create({
data: inputs,
});
return { cache, message: null };
} catch (error) {
console.error(error.message);
return { cache: null, message: error.message };
}
},
get: async function (clause = {}, limit = null, orderBy = null) {
try {
const cache = await prisma.cache_data.findFirst({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return cache || null;
} catch (error) {
console.error(error.message);
return null;
}
},
delete: async function (clause = {}) {
try {
await prisma.cache_data.deleteMany({
where: clause,
});
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
where: async function (clause = {}, limit = null, orderBy = null) {
try {
const caches = await prisma.cache_data.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return caches;
} catch (error) {
console.error(error.message);
return [];
}
},
count: async function (clause = {}) {
try {
const count = await prisma.cache_data.count({
where: clause,
});
return count;
} catch (error) {
console.error(error.message);
return 0;
}
},
};
module.exports = { CacheData };