import { v4 as uuidv4 } from "uuid";
import { Ollama } from "@langchain/ollama";
import { PromptTemplate } from "@langchain/core/prompts";
import {
  MessagesAnnotation,
  StateGraph,
  MemorySaver,
} from "@langchain/langgraph";
import {
  HumanMessage,
  AIMessage,
  trimMessages,
} from "@langchain/core/messages";

class OllamaModel {
  app;
  model;
  threadId;

  constructor() {
    this.model = new Ollama({
      baseUrl: process.env.MODEL_URL,
      model: "deepseek-r1:7b",
    });
    this.trimMessages = trimMessages({
      maxTokens: 4000,
      strategy: "last",
      includeSystem: true,
      allowPartial: true,
      tokenCounter: (msg) => msg.length / 4,
    });

    const workflow = new StateGraph(MessagesAnnotation)
      .addNode("model", this.llmCb.bind(this))
      .addEdge("__start__", "model")
      .addEdge("model", "__end__");

    const memory = new MemorySaver();
    this.app = workflow.compile({ checkpointer: memory });
    this.threadId = uuidv4();
  }

  async llmCb(state, config) {
    try {
      const messages = await this.trimMessages.invoke(state.messages);
      const stream = await this.model.stream(messages);
      let fullCount = "";
      for await (const chunk of stream) {
        fullCount += chunk;
        const handlers = config?.callbacks?.handlers;
        if (Array.isArray(handlers)) handlers.forEach((cb) => cb(fullCount));
      }
      return { messages: [...state.messages, new AIMessage(fullCount)] }; // 保留历史
    } catch (error) {
      console.log("ai receive value is fail...", error);
      return { messages: state.messages };
    }
  }

  async formatQuestion(messages) {
    if (!Array.isArray(messages)) messages = [messages];
    const messageList = [];
    for (const item of messages) {
      const { role = "user", message, value = {} } = item;
      let content = message;
      if (Object.keys(value).length) {
        const prompt = PromptTemplate.fromTemplate(message);
        content = await prompt.format(value);
      }
      messageList.push(
        role === "user" ? new HumanMessage(content) : new AIMessage(content)
      );
    }
    return messageList;
  }

  async sendMessage(value, threadId = this.threadId, cb) {
    try {
      const question = await this.formatQuestion(value);
      const response = await this.app.invoke(
        { messages: question },
        {
          configurable: { thread_id: threadId },
          callbacks: [(chunk) => typeof cb === "function" && cb(chunk)],
        }
      );
      return response;
    } catch (error) {
      throw new Error(`Message processing failed: ${error.message}`);
    }
  }
}

export default new OllamaModel();
