package com.xp.ai.chat.ollama;

import com.xp.ai.util.ModelUtils;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.model.StreamingResponseHandler;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.output.Response;

import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;

/**
 * 流式对话，做聊天机器人的时候可以用
 */
public class SimpleStreamChat {
    public static void main(String[] args) {
        String msg = "中国的一线城市有哪些？";
        //流式的模型API 和普通的模型 API 不一样 普通的式 ChatLanguageModel 流式的是 StreamingChatLanguageModel
        StreamingChatLanguageModel model = ModelUtils.getOllamaStreamModel();
        CompletableFuture<ChatResponse> responseFuture = new CompletableFuture<>();
        model.chat(msg, new StreamingChatResponseHandler() {

            /**
             * 输出流式的token
             * 目前这个方法是实验性的
             * @param partialResponse The partial response (usually a single token), which is a part of the complete response.
             */
            @Override
            public void onPartialResponse(String partialResponse) {
                System.out.println(partialResponse);
                try {
                    TimeUnit.MILLISECONDS.sleep(500L);
                } catch (InterruptedException e) {
                    throw new RuntimeException(e);
                }
            }

            @Override
            public void onCompleteResponse(ChatResponse completeResponse) {
                responseFuture.complete(completeResponse);
            }

            @Override
            public void onError(Throwable error) {
                responseFuture.completeExceptionally(error);
            }
        });

        responseFuture.join();
    }
}
