import {Annotation, END, MemorySaver, START, StateGraph} from "@langchain/langgraph";
import {AIMessage, BaseMessage} from "@langchain/core/messages";
import {z} from 'zod'
import {tool} from "@langchain/core/tools";
import {ToolNode} from "@langchain/langgraph/prebuilt";
import {qwen} from "../models";
import {RunnableConfig} from "@langchain/core/runnables";
import {saveImages} from "../tool";


/**
 * 通过checkpointer持久化，配合thread_id 可以区分不同的对话线程。
 */

const GraphState =  Annotation.Root({
    messages: Annotation<BaseMessage[]>({
        reducer: (x, y) => {
            return x.concat(y)
        }
    })
})

const searchTool = tool(async ({}: {query: string}) => {
    return "Cold, with a low of 13 ℃";
}, {
    name: 'search',
    description: "Use to surf the web, fetch current information, check the weather, and retrieve other information.",
    schema: z.object({
        query: z.string().describe("The query to use in your search.")
    })
})

console.log(await searchTool.invoke({ query: "What's the weather like?"}))

const tools = [searchTool]

const toolNode = new ToolNode(searchTool);

const boundModel = qwen.bindTools(tools)

const routeMessage = (state: typeof GraphState.State) => {
    const {messages} = state;
    const lastMessage =  messages[messages.length - 1] as AIMessage;

    if(!lastMessage.tool_calls?.length) {
        return END;
    }
    return 'tools'
}

const callModel = async (state: typeof GraphState.State, config?: RunnableConfig)=> {
    const { messages } = state;
    const response = await boundModel.invoke(messages, config)
    return {messages: [response]}

}

const workflow = new StateGraph(GraphState)
.addNode("agent", callModel)
.addNode("tools", toolNode)
.addEdge(START, "agent")
.addConditionalEdges('agent', routeMessage)
.addEdge("tools", "agent")

// const graph = workflow.compile();

const memory = new MemorySaver();
const persistentGraph = workflow.compile({
    checkpointer: memory
})

// await saveImages(graph, '持久化.png')

let inputs = {
    messages: [
        {
            role: 'user',
            content: "Hi I'm Yu, nice to meet you."
        }
    ]
}
let config = {
    configurable: {
        thread_id: "conversation-num-1"
    }
}
for await (
    const { messages } of await persistentGraph.stream(inputs, {
    ...config,
    streamMode: 'values',
})
) {
    let msg = messages[messages?.length - 1];
    if(msg?.content) {
        console.log(msg.content)
    } else if(msg?.tool_calls?.length > 0) {
        console.log(msg.tool_calls);
    } else {
        console.log(msg);
    }

    console.log('-------\n')
}

inputs = { messages: [{ role: "user", content: "what's my name?" }] };
for await (
    const { messages } of await persistentGraph.stream(inputs, {
    ...config,
    streamMode: "values",
})
    ) {
    let msg = messages[messages?.length - 1];
    if (msg?.content) {
        console.log(msg.content);
    } else if (msg?.tool_calls?.length > 0) {
        console.log(msg.tool_calls);
    } else {
        console.log(msg);
    }
    console.log("-----\n");
}

config = { configurable: { thread_id: "conversation-2" } };
inputs = { messages: [{ role: "user", content: "what's my name??" }] };
for await (
    const { messages } of await persistentGraph.stream(inputs, {
    ...config,
    streamMode: "values",
})
    ) {
    let msg = messages[messages?.length - 1];
    if (msg?.content) {
        console.log(msg.content);
    } else if (msg?.tool_calls?.length > 0) {
        console.log(msg.tool_calls);
    } else {
        console.log(msg);
    }
    console.log("-----\n");
}