package com.it.config;

import com.it.listener.TestChatModelListener;
import com.it.service.ChatMemoryAssistant;
import dev.langchain4j.community.model.dashscope.WanxImageModel;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatModel;
import dev.langchain4j.model.chat.StreamingChatModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiEmbeddingModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.qdrant.QdrantEmbeddingStore;
import io.qdrant.client.QdrantClient;
import io.qdrant.client.QdrantGrpcClient;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

import java.time.Duration;
import java.util.List;

@Configuration
public class LLMconfig {
    //深度思考
    @Bean(name = "chatModelDeepSeek")
    public StreamingChatModel chatModelDeepSeek() {
        return OpenAiStreamingChatModel.builder()
                .apiKey(System.getenv("deepseek-api"))
                .baseUrl("https://api.deepseek.com/")
                .modelName("deepseek-reasoner")
                .temperature(0.5)
                .listeners(List.of(new TestChatModelListener()))
                .timeout(Duration.ofSeconds(3000))
                .logRequests(true)
                .logResponses(true)
                .build();
    }
    //长对话
    @Bean(name = "chatModelQWenLong")
    public StreamingChatModel chatModelQWenLong() {
        return OpenAiStreamingChatModel.builder()
                .apiKey(System.getenv("QWen-api"))
                .baseUrl("https://dashscope.aliyuncs.com/compatible-mode/v1")
                .modelName("qwen-long")
                .temperature(0.7)
                .timeout(Duration.ofSeconds(3000))
                .logRequests(true)
                .logResponses(true)
                .build();
    }
    //图生文
    @Bean(name = "chatModelQWen")
    public ChatModel chatModelQWen() {
        return OpenAiChatModel.builder()
                .apiKey(System.getenv("QWen-api"))
                .baseUrl("https://dashscope.aliyuncs.com/compatible-mode/v1")
                .modelName("qwen-vl-max")
                .temperature(0.7)
                .timeout(Duration.ofSeconds(3000))
                .logRequests(true)
                .logResponses(true)
                .build();
    }
    //文生图
    @Bean
    public WanxImageModel wanxImageModel() {
        return WanxImageModel.builder()
                .apiKey(System.getenv("QWen-api"))
                .modelName("wanx2.1-t2i-turbo")
                .build();
    }
    //文本向量化
    @Bean
    public EmbeddingModel embeddingModel() {
        return OpenAiEmbeddingModel.builder()
                .apiKey(System.getenv("QWen-api"))
                .modelName("text-embedding-v3")
                .dimensions(1024)
                .baseUrl("https://dashscope.aliyuncs.com/compatible-mode/v1")
                .build();
    }


    @Bean
    public EmbeddingStore<TextSegment> embeddingStore() {
        return QdrantEmbeddingStore.builder()
                .host("127.0.0.1")
                .port(6334)
                .collectionName("test-qdrant")
                .build();
    }
    //Rag
    @Bean
    public QdrantClient adrantClient() {
        QdrantGrpcClient.Builder qdrantClient =  QdrantGrpcClient.newBuilder("127.0.0.1",6334,false);
        return new QdrantClient(qdrantClient.build());
    }

@Bean
public ChatMemoryAssistant chatMemoryAssistant(
        StreamingChatModel chatModelQWenLong,
        EmbeddingModel embeddingModel,
        EmbeddingStore<TextSegment> embeddingStore) {

    return AiServices.builder(ChatMemoryAssistant.class)
            .streamingChatModel(chatModelQWenLong)
            .chatMemoryProvider(memoryId -> MessageWindowChatMemory.withMaxMessages(100))
            .contentRetriever(
                    new EmbeddingStoreContentRetriever(embeddingStore, embeddingModel)
            )
            .build();
}
    @Bean
    public ChatMemoryAssistant DeepSeekMemoryAssistant(
            StreamingChatModel chatModelDeepSeek,
            EmbeddingModel embeddingModel,
            EmbeddingStore<TextSegment> embeddingStore) {

        return AiServices.builder(ChatMemoryAssistant.class)
                .streamingChatModel(chatModelDeepSeek)
                .chatMemoryProvider(memoryId -> MessageWindowChatMemory.withMaxMessages(100))
                .contentRetriever(
                        new EmbeddingStoreContentRetriever(embeddingStore, embeddingModel)
                )
                .build();
    }
}
