// 演示 MultiQueryRetriever 基本使用
import { NomicEmbeddings } from "./utils/embed.js";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { TextLoader } from "langchain/document_loaders/fs/text";
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
import { MultiQueryRetriever } from "langchain/retrievers/multi_query";
import { ChatOllama } from "@langchain/ollama";

const loader = new TextLoader("data/kong.txt");

const docs = await loader.load();

const splitter = new RecursiveCharacterTextSplitter({
  chunkSize: 64,
  chunkOverlap: 0,
});

const splittedDocs = await splitter.splitDocuments(docs);

const embeddings = new NomicEmbeddings(3);

const store = new MemoryVectorStore(embeddings);

await store.addDocuments(splittedDocs);

const retriever = store.asRetriever(2); // 创建一个基础的检索器

const llm = new ChatOllama({
  model: "llama3",
  temperature: 0.7,
});

const r = MultiQueryRetriever.fromLLM({
  llm, // 使用什么模型来改写提示词
  retriever, // 基础检索器
  queryCount: 3, // 提示词要扩充的数量
  verbose: true, // 打开调试日志
});

const res = await r.invoke("茴香豆是做什么用的？");
console.log(res);
