package com.yc;


import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.SystemMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.TokenWindowChatMemory;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiChatModelName;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.model.openai.OpenAiTokenCountEstimator;
import io.reactivex.Completable;

import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;

public class _08_Memory {
    public static void main(String[] args) throws ExecutionException, InterruptedException {
        String apiKey=System.getenv("DEEPSEEK_API_KEY");
        OpenAiStreamingChatModel model = OpenAiStreamingChatModel.builder()
                .apiKey(apiKey)
                .modelName("deepseek-chat")
                .baseUrl("https://api.deepseek.com")
                .logRequests(true)
                .logResponses(true)
                .build();
        ChatMemory chatMemory=TokenWindowChatMemory.withMaxTokens(1000,new OpenAiTokenCountEstimator( OpenAiChatModelName.GPT_4_O_MINI_2024_07_18));

        //系统提示词
        SystemMessage systemMessage = SystemMessage.from("You are a helpful assistant.");
        chatMemory.add(systemMessage);
        //用户提示词
        UserMessage userMessage = UserMessage.from("你好,最多3到5行描述一下java");
        chatMemory.add(userMessage);

        System.out.println("用户消息:"+userMessage.singleText());

        AiMessage aiMessage1 =streamChat(model, chatMemory);

        chatMemory.add(aiMessage1);
        UserMessage userMessage2 = UserMessage.from("你好,最多3到5行描述一下python");
        chatMemory.add(userMessage2);

        AiMessage AiMessage2=streamChat(model, chatMemory);

    }

    private static AiMessage streamChat(OpenAiStreamingChatModel model, ChatMemory chatMemory) throws ExecutionException, InterruptedException {
        CompletableFuture<AiMessage> future=new CompletableFuture<>();
//        String prompt="请告诉我什么是RAG";
        model.chat(chatMemory.messages(),new StreamingChatResponseHandler() {
            @Override
            public void onPartialResponse(String s) {
                System.out.println(s);
            }
            @Override
            public void onCompleteResponse(ChatResponse chatResponse) {
//                System.out.println("Done Streaming"+chatResponse.toString());
                future.complete(chatResponse.aiMessage());
            }

            @Override
            public void onError(Throwable throwable) {
//                System.out.println("Error Streaming");
//                throwable.printStackTrace();
                future.completeExceptionally(throwable);
            }
        });
        return future.get();
    }
}
