package com.example.studyllm.component.model;

import dev.langchain4j.model.chat.StreamingChatLanguageModel;
import dev.langchain4j.model.ollama.OllamaStreamingChatModel;
import io.github.czelabueno.jai.workflow.StateWorkflow;
import io.github.czelabueno.jai.workflow.WorkflowStateName;
import io.github.czelabueno.jai.workflow.langchain4j.internal.DefaultJAiWorkflow;
import io.github.czelabueno.jai.workflow.langchain4j.node.StreamingNode;
import io.github.czelabueno.jai.workflow.node.Conditional;
import io.github.czelabueno.jai.workflow.node.Node;
import reactor.core.publisher.Flux;

import java.util.Arrays;

public class Example {
    public static void main(String[] args) {

        MyStatefulBean myStatefulBean = new MyStatefulBean();
        String[] documents = new String[]{
                "https://lilianweng.github.io/posts/2023-06-23-agent/",
                "https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/"
        };

        StreamingChatLanguageModel model = buildStreamModel();
        // Create the nodes and associate them with the functions to be used during execution.
        Node<MyStatefulBean, MyStatefulBean> retrieveNode = Node.from(
                "Retrieve Node",
                obj -> MyStatefulBeanFunctions.extractRelevantDocuments(obj, documents));
        Node<MyStatefulBean, MyStatefulBean> webSearchNode = Node.from(
                "Web Searching Node",
                obj -> MyStatefulBeanFunctions.searchWeb(obj));
        StreamingNode<MyStatefulBean> generateAnswerNode = StreamingNode.from(
                "Generation Node",
                obj -> MyStatefulBeanFunctions.generateUserMessageUsingPrompt(obj),
                model);
        // Create workflow
        DefaultJAiWorkflow<MyStatefulBean> workflow = DefaultJAiWorkflow.<MyStatefulBean>builder()
                .nodes(Arrays.asList(retrieveNode, webSearchNode,generateAnswerNode))
                .statefulBean(myStatefulBean)
                .runStream(true)
                .build();
        StateWorkflow stateWorkflow = workflow.workflow();
        stateWorkflow.startNode(retrieveNode);
        stateWorkflow.putEdge(retrieveNode,webSearchNode);
        stateWorkflow.putEdge(webSearchNode,new Conditional(obj -> generateAnswerNode));
        stateWorkflow.putEdge(webSearchNode,generateAnswerNode);
        stateWorkflow.putEdge(generateAnswerNode, WorkflowStateName.END);




        // Start conversation with the workflow in streaming mode
        String question = "Summarizes the importance of building agents with LLMs";

      //  System.out.println("---"+workflow.answer(question));
        Flux<String> tokens = workflow.answerStream(question);
        tokens.subscribe(System.out::println);
    }
    private static StreamingChatLanguageModel buildStreamModel(){
        return OllamaStreamingChatModel.builder()
                .baseUrl("http://47.109.192.172:11434")
                .modelName("qwen2:7b")
                .temperature(0.1)
                .build();
    }

}