import { Ollama } from "@langchain/ollama";
import { StateGraph } from "@langchain/langgraph";
import EmbeddingsModal from "./embending.js";

class RagModal extends EmbeddingsModal {
  model;
  graph;

  constructor() {
    super();

    this.model = new Ollama({
      baseUrl: process.env.MODEL_URL,
      model: "deepseek-r1:7b",
    });
    this.graph = new StateGraph(this.annotation)
      .addNode("retrieve", this.searchContext.bind(this))
      .addNode("generate", this.generate.bind(this))
      .addEdge("__start__", "retrieve")
      .addEdge("retrieve", "generate")
      .addEdge("generate", "__end__")
      .compile({});
  }

  async generate(state) {
    console.log("generate start...");
    const docsContent = state.context.map((doc) => doc.pageContent).join("\n");

    const programTemplate = await this.getRagProgramTemplate();
    const message = await programTemplate.invoke({
      question: state.question, // 用户的问题
      context: docsContent, // 向量匹配结果
    });

    const stream = await this.model.stream(message);
    console.log("generate end...");

    let fullChunk = "";
    for await (const chunk of stream) {
      fullChunk += chunk;
      console.log(fullChunk);
    }

    return { answer: fullChunk };
  }

  async sendMessage(value) {
    // 准备 rag 数据
    await this.startRagSearch();
    const result = await this.graph.invoke({ question: value });
    return result;
  }
}

export default new RagModal();
