import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama";
import { Chroma } from "@langchain/community/vectorstores/chroma";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ChatOllama } from "@langchain/community/chat_models/ollama";

const embeddings = new OllamaEmbeddings({
    model: "milkey/m3e", // default value
    baseUrl: "http://117.72.38.226:11434", // default value
    requestOptions: {
    useMMap: true,
    numThread: 6,
    // numGpu: 1,
    },
});

const qwen = new ChatOllama({
    baseUrl: "http://117.72.38.226:11434", // Default value
    model: "qwen:4b", // Default value
});
const question = "有一个叫何重洋的程序员?他手机号多少？";
const vectorStore = new Chroma(
    embeddings,
    {
      collectionName: "1790011348373999616",
      url: "http://117.72.38.226:8000",
    }
)
const s1 = Date.now()
const storyChunks = await vectorStore.similaritySearch(question,1);
const s2 = Date.now()
console.log(storyChunks)
console.log("检索文档片段耗时：",s2-s1," 毫秒")
const s3 = Date.now()
///////////////// 下面开始提问
// 形成一个聊天模板
// const chatPrompt = ChatPromptTemplate.fromMessages([
//     ["system", "你是一个私人信息助理。请根据文档提示信息回答用户的问题.文档内容如下：{docs}"],
//   ]);
// const formattedChatPrompt = await chatPrompt.formatMessages({
//     docs: storyChunks[0].pageContent,
//     question: "何重洋今年几岁？",
// });

// const answerChain = chatPrompt.pipe(qwen)

// const result =  await answerChain.invoke({
//     doc:formattedChatPrompt
//   });
const prompt = ChatPromptTemplate.fromMessages([
  [
    "system",
    `你是一个私人信息助理。请根据文档提示信息回答用户的问题.文档内容如下：{docs}`,
  ],
  ["user", `请告诉我{question}`],
]);
const chain = prompt.pipe(qwen);
const result = await chain.invoke({
  docs: storyChunks[0].pageContent,
  question: "何重洋今年几岁？",
});

console.log(result);

const s4 = Date.now()
console.log(result)
console.log("llm回答问题耗时：",s4-s3," 毫秒")
