import path from "path";

import { pull } from "langchain/hub";
import { StateGraph, MessagesAnnotation } from "@langchain/langgraph";
import { Ollama, OllamaEmbeddings } from "@langchain/ollama";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters";
import { DocxLoader } from "@langchain/community/document_loaders/fs/docx";
import { z } from "zod";
import { tool } from "@langchain/core/tools";
import { toolsCondition } from "@langchain/langgraph/prebuilt";
import { ToolNode } from "@langchain/langgraph/prebuilt";

// llm 模型
const model = new Ollama({
  baseUrl: process.env.MODEL_URL,
  model: "deepseek-r1:7b",
});
// embedding 模型
const embedding = new OllamaEmbeddings({
  model: "nomic-embed-text:latest",
  baseUrl: process.env.MODEL_URL,
});
// 向量数据库
const memoStorage = new MemoryVectorStore(embedding);
// 向量数据切割规则
const textSplitter = new RecursiveCharacterTextSplitter({
  chunkSize: 1000,
  chunkOverlap: 200,
});

// 采用工具的方式来查询向量数据库
const retrieveSchema = z.object({ query: z.string() });
const searchContext = tool(
  async ({ query }) => {
    const retrievedDocs = await vectorStore.similaritySearch(query, 2);
    const serialized = retrievedDocs
      .map(
        (doc) => `Source: ${doc.metadata.source}\nContent: ${doc.pageContent}`
      )
      .join("\n");
    return [serialized, retrievedDocs];
  },
  {
    name: "retrieve",
    description: "Retrieve information related to a query.",
    schema: retrieveSchema,
    responseFormat: "content_and_artifact",
  }
);
const tools = new ToolNode([searchContext]);

// 改造，在这里去向 ai 模型提问
async function queryOrRespond(state) {
  const llmWithTools = model.bindTools([searchContext]);
  const response = await llmWithTools.invoke(state.messages);
  return { messages: [response] };
}

// llm 生成结果
async function generate(state) {
  let recentToolMessages = [];
  for (let i = state["messages"].length - 1; i >= 0; i--) {
    let message = state["messages"][i];
    if (message instanceof ToolMessage) {
      recentToolMessages.push(message);
    } else {
      break;
    }
  }
  let toolMessages = recentToolMessages.reverse();

  // Format into prompt
  const docsContent = toolMessages.map((doc) => doc.content).join("\n");
  const systemMessageContent =
    "You are an assistant for question-answering tasks. " +
    "Use the following pieces of retrieved context to answer " +
    "the question. If you don't know the answer, say that you " +
    "don't know. Use three sentences maximum and keep the " +
    "answer concise." +
    "\n\n" +
    `${docsContent}`;

  const conversationMessages = state.messages.filter(
    (message) =>
      message instanceof HumanMessage ||
      message instanceof SystemMessage ||
      (message instanceof AIMessage && message.tool_calls.length == 0)
  );
  const prompt = [
    new SystemMessage(systemMessageContent),
    ...conversationMessages,
  ];

  // Run
  const response = await llm.invoke(prompt);
  return { messages: [response] };
}

// langgraph 模板
const graph = new StateGraph(MessagesAnnotation)
  .addNode("queryOrRespond", queryOrRespond)
  .addNode("tools", tools)
  .addNode("generate", generate)
  .addEdge("__start__", "queryOrRespond")
  .addConditionalEdges("queryOrRespond", toolsCondition, {
    __end__: "__end__",
    tools: "tools",
  })
  .addEdge("tools", "generate")
  .addEdge("generate", "__end__")
  .compile();

// 准备数据内容
async function readyStorage() {
  // 读取知识库并切割文本
  const load = new DocxLoader(
    path.resolve("C:/Users/AO237/Desktop/search/search.docx")
  );
  const fileContent = await load.load();
  const splitterValue = await textSplitter.splitDocuments(fileContent);

  // 将数据切割后的数据存储到向量数据库
  await memoStorage.addDocuments(splitterValue);
}

export const sendMessage = async (message) => {
  console.log("message is:", message);
  await readyStorage();

  let inputs2 = {
    messages: [{ role: "user", content: message }],
  };

  for await (const step of await graph.stream(inputs2, {
    streamMode: "values",
  })) {
    const lastMessage = step.messages[step.messages.length - 1];
    console.log(lastMessage);
  }
};
