// 演示 ContextualCompressionRetriever 的使用
import { NomicEmbeddings } from "./utils/embed.js";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
import { ChatOllama } from "@langchain/ollama";
import { ContextualCompressionRetriever } from "langchain/retrievers/contextual_compression";
import { LLMChainExtractor } from "langchain/retrievers/document_compressors/chain_extract";

const loader = new TextLoader("data/kong.txt");

const docs = await loader.load();

const splitter = new RecursiveCharacterTextSplitter({
  chunkSize: 64,
  chunkOverlap: 0,
});

const splittedDocs = await splitter.splitDocuments(docs);

const embeddings = new NomicEmbeddings(3);

const store = new MemoryVectorStore(embeddings);

await store.addDocuments(splittedDocs);

const retriever = store.asRetriever(2); // 创建一个基础的检索器

const res = await retriever.invoke("茴香豆是做什么用的？");

console.log(`压缩前的检索结果：`, res);

const llm = new ChatOllama({
  model: "llama3",
  temperature: 0.7,
});

const r = new ContextualCompressionRetriever({
  baseRetriever: retriever,
  baseCompressor: LLMChainExtractor.fromLLM(llm),
});

const res2 = await r.invoke("茴香豆是做什么用的？");
console.log(`压缩后的检索结果：`, res2);
