package com.langChain4j.model.config;

import com.langChain4j.api.constants.ModelHostConstant;
import com.langChain4j.api.enums.ModelEnum;
import com.langChain4j.model.Assistant;
import com.langChain4j.model.StreamAssistant;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.ChatMemoryProvider;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.StreamingChatModel;
import dev.langchain4j.model.embedding.onnx.bgesmallenv15q.BgeSmallEnV15QuantizedEmbeddingModel;
import dev.langchain4j.model.ollama.OllamaChatModel;
import dev.langchain4j.model.ollama.OllamaStreamingChatModel;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.memory.chat.ChatMemoryStore;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

/**
 * @author linjun
 * @date 2025/10/11 09:41
 * @description
 */
@Configuration
public class ChatModelClientConfig {

    /**
     * chatModel初始化，可优化成使用配置初始化对象
     */
    @Bean
    public OllamaChatModel ollamaChatModel() {
        return OllamaChatModel.builder()
                .baseUrl(ModelHostConstant.OLLAMA_API_URL)
                .modelName(ModelEnum.MODEL_OLLAMA_DEFAULT.getCode())
                .temperature(1.0)
                .build();
    }

    /**
     * chatModel初始化，可优化成使用配置初始化对象
     */
    @Bean
    public StreamingChatModel streamingChatModel() {
        return OllamaStreamingChatModel.builder()
                .baseUrl(ModelHostConstant.OLLAMA_API_URL)
                .modelName(ModelEnum.MODEL_OLLAMA_DEFAULT.getCode())
                .temperature(1.0)
                .build();
    }

    @Bean
    public ContentRetriever contentRetriever(EmbeddingStore<TextSegment> embeddingStore) {
        return EmbeddingStoreContentRetriever.builder()
                .embeddingStore(embeddingStore)
                .embeddingModel(new BgeSmallEnV15QuantizedEmbeddingModel())
                .maxResults(2)
                .minScore(0.5)
                .build();
    }

    @Bean
    public Assistant chatModelClient(ChatMemoryStore chatMemoryStore, OllamaChatModel ollamaChatModel, ContentRetriever contentRetriever) {
        // 创建会话内存
        ChatMemoryProvider chatMemoryProvider = memoryId -> MessageWindowChatMemory.builder()
                .id(memoryId)
                .maxMessages(10)
                .chatMemoryStore(chatMemoryStore)
                .build();
        return AiServices.builder(Assistant.class)
                .chatModel(ollamaChatModel)
                .chatMemoryProvider(chatMemoryProvider)
                .contentRetriever(contentRetriever)
                .build();
    }

    @Bean
    public StreamAssistant streamChatModelClient(ChatMemoryStore chatMemoryStore, StreamingChatModel streamingChatModel, ContentRetriever contentRetriever) {
        // 创建会话内存
        ChatMemoryProvider chatMemoryProvider = memoryId -> MessageWindowChatMemory.builder()
                .id(memoryId)
                .maxMessages(10)
                .chatMemoryStore(chatMemoryStore)
                .build();
        return AiServices.builder(StreamAssistant.class)
                .streamingChatModel(streamingChatModel)
                .chatMemoryProvider(chatMemoryProvider)
                .contentRetriever(contentRetriever)
                .build();
    }
}
