package org.wenshu.ai.example;

import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;

import java.util.concurrent.CompletableFuture;

import static dev.langchain4j.model.openai.OpenAiChatModelName.GPT_4_O_MINI;

public class _04_Streaming {

    public static void main(String[] args) {

        OpenAiStreamingChatModel model = OpenAiStreamingChatModel.builder()
            .baseUrl("https://maas-api.cn-huabei-1.xf-yun.com/v1")
            .apiKey("your-key")
            .modelName("xdeepseekv3")
                .build();

        String prompt = "Write a short funny poem about developers and null-pointers, 10 lines maximum";

        System.out.println("Nr of chars: " + prompt.length());
        System.out.println("Nr of tokens: " + model.estimateTokenCount(prompt));
        CompletableFuture<ChatResponse> futureResponse = new CompletableFuture<>();

        StreamingChatResponseHandler streamingChatResponseHandler = new StreamingChatResponseHandler() {

            @Override
            public void onPartialResponse(String partialResponse) {
                System.out.print(partialResponse);
            }

            @Override
            public void onCompleteResponse(ChatResponse completeResponse) {
                System.out.println("\n\nDone streaming");
                futureResponse.complete(completeResponse);
            }

            @Override
            public void onError(Throwable error) {
                System.out.println("Something went wrong: " + error.getMessage());
                futureResponse.completeExceptionally(error);
            }
        };
        model.chat(prompt, streamingChatResponseHandler);
        futureResponse.join();

        System.out.println("end");

    }
}
