package com.atguigu.study.config;

import com.atguigu.study.service.ChatAssistant;
import com.atguigu.study.service.ChatMemoryAssistant;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.memory.chat.TokenWindowChatMemory;
import dev.langchain4j.model.TokenCountEstimator;
import dev.langchain4j.model.chat.ChatModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiTokenCountEstimator;
import dev.langchain4j.service.AiServices;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

/**
 * @author Marco
 * @Description 知识出处： https://docs.langchain4j.dev/tutorials/chat-memory
 * @date 2025/8/20 16:36
 * @email 3293336923@qq.com
 */
@Configuration
public class LLMConfig {

    @Bean
    public ChatModel chatModel() {
        return
                OpenAiChatModel.builder()
                        .apiKey(System.getenv("ALIQW"))
//                        qwen-long 适合处理长文本
                        .modelName("qwen-long")
                        .baseUrl("https://dashscope.aliyuncs.com/compatible-mode/v1")
                        .build();
    }


    @Bean(name = "chat")
    public ChatAssistant chatAssistant(ChatModel chatModel) {

        return AiServices.create(ChatAssistant.class, chatModel);
    }

    //      按照 MessageWindowChatMemory
    @Bean(name = "chatMessageWindowChatMemory")
    public ChatMemoryAssistant chatMessageWindowChatMemory(ChatModel chatModel) {

        return AiServices.builder(ChatMemoryAssistant.class)
                .chatModel(chatModel)
//                按照 memoryId  创建   ChatMary 具体实现类 对象 , 最大记忆 当前 窗口 100 条 记录(最近的50条)
                .chatMemoryProvider(memoryId -> MessageWindowChatMemory.withMaxMessages(100))
                .build();
    }

    @Bean(name = "chatTokenWindowChatMemory")
    public ChatMemoryAssistant chatTokenWindowChatMemory(ChatModel  chatModel) {

//   创建一个Token 计数估计器实例，并明确指定它使用 GPT-4 模型的分词规则来计算文本的 token 数量。
        TokenCountEstimator    tokenCountEstimator = new OpenAiTokenCountEstimator("gpt-4");
        return AiServices.builder(ChatMemoryAssistant.class)
                .chatModel(chatModel)
                .chatMemoryProvider(memoryId->
                                TokenWindowChatMemory.withMaxTokens(1000, tokenCountEstimator)
                        )
                .build();

    }
}
