import { getPodTool } from "../../llm/tools/get-pod-tool";
import {
  getPodLogTool,
  getPodPreFailLogTool,
} from "../../llm/tools/get-pod-log-tool";
//import {searchTool} from "../../llm/tools/search/search-tool";
import { createReactAgent } from "@langchain/langgraph/prebuilt";
import { MemorySaver } from "@langchain/langgraph";
import { ChatOpenAI } from "@langchain/openai";
import { llmPreferenceStore } from "../../llm/preference/llm-preference-store";

export const getStream = async (messages) => {
  const checkpointer = new MemorySaver();
  const model = new ChatOpenAI({
    model: llmPreferenceStore.model,
    configuration: {
      baseURL: llmPreferenceStore.baseURL,
      apiKey: llmPreferenceStore.apiKey,
    },
    apiKey: llmPreferenceStore.apiKey,
    timeout: 600000,
    verbose: true,
  });
  const stream = llmPreferenceStore.useK8sTool
    ? await createReactAgent({
      llm: model,
      tools: [
        getPodTool,
        getPodLogTool,
        getPodPreFailLogTool /*,searchTool*/,
      ],
      checkpointSaver: checkpointer,
    }).stream(
      { messages: messages },
      { configurable: { thread_id: "42" }, streamMode: "messages" }
    )
    : await model.stream(messages);

  return stream;
};
