package agent.configuration.rag;

import agent.configuration.ModelList;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.embedding.bge.small.en.v15.BgeSmallEnV15QuantizedEmbeddingModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.rag.DefaultRetrievalAugmentor;
import dev.langchain4j.rag.RetrievalAugmentor;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.rag.query.transformer.CompressingQueryTransformer;
import dev.langchain4j.rag.query.transformer.QueryTransformer;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.Result;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;


import java.util.List;

import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument;

@EnableConfigurationProperties(RagConfig.class)
@ConfigurationProperties(prefix = RagConfig.PREFIX)
public class RagConfig {

    public static final String PREFIX = "rag";

    private String openAiKey ;


    private String baseURL;

    public void text(){
        List<Document> documents = FileSystemDocumentLoader.loadDocuments("/home/langchain4j/documentation");

        Assistant assistant = AiServices.builder(Assistant.class)
                .chatLanguageModel(OpenAiChatModel.builder().baseUrl(baseURL).apiKey(openAiKey).build())
                .chatMemory(MessageWindowChatMemory.withMaxMessages(10))
                .contentRetriever(createContentRetriever(documents))
                .build();



    }

    private ContentRetriever createContentRetriever(List<Document> documents) {

        return null;
    }

    /**
     *
     *
     * // 以下是查询 压缩  压缩 之后 再去在 中 匹配
     * Please refer to { Naive_RAG_Example} for a basic context.
     * <p>
     * Advanced RAG in LangChain4j is described here: https://github.com/langchain4j/langchain4j/pull/538
     * <p>
     * This example illustrates the implementation of a more sophisticated RAG application
     * using a technique known as "query compression".
     * Often, a query from a user is a follow-up question that refers back to earlier parts of the conversation
     * and lacks all the necessary details for effective retrieval.
     * For example, consider this conversation:
     * User: What is the legacy of John Doe?
     * AI: John Doe was a...
     * User: When was he born?
     * <p>
     * In such scenarios, using a basic RAG approach with a query like "When was he born?"
     * would likely fail to find articles about John Doe, as it doesn't contain "John Doe" in the query.
     * Query compression involves taking the user's query and the preceding conversation, then asking the LLM
     * to "compress" this into a single, self-contained query.
     * The LLM should generate a query like "When was John Doe born?".
     * This method adds a bit of latency and cost but significantly enhances the quality of the RAG process.
     * It's worth noting that the LLM used for compression doesn't have to be the same as the one
     * used for conversation. For instance, you might use a smaller local model trained for summarization.
     */

    public  void main(String[] args) {

        Assistant assistant = createAssistant("documents/biography-of-john-doe.txt");

        // First, ask "What is the legacy of John Doe?"
        // Then, ask "When was he born?"
        // Now, review the logs:
        // The first query was not compressed as there was no preceding context to compress.
        // The second query, however, was compressed into something like "When was John Doe born?"

    }

    private  Assistant createAssistant(String documentPath) {

        Document document = loadDocument(documentPath, new TextDocumentParser());
//
//        TextSegmentTransformer 类似于 DocumentTransformer（上面描述的），但它转换 TextSegment。
//
//        与 DocumentTransformer 一样，没有一种通用的解决方案， 所以我们建议实现你自己的 TextSegmentTransformer，根据你独特的数据定制。
//
//        一种对改善检索效果很好的技术是在每个 TextSegment 中包含 Document 标题或简短摘要。

        EmbeddingModel embeddingModel = new BgeSmallEnV15QuantizedEmbeddingModel();
//        Embedding.dimension() 返回嵌入向量的维度（其长度）
//        CosineSimilarity.between(Embedding, Embedding) 计算两个 Embedding 之间的余弦相似度
//        Embedding.normalize() 规范化嵌入向量（就地）


//        EmbeddingModel.embed(String) 嵌入给定的文本
//        EmbeddingModel.embed(TextSegment) 嵌入给定的 TextSegment
//        EmbeddingModel.embedAll(List<TextSegment>) 嵌入所有给定的 TextSegment
//        EmbeddingModel.dimension() 返回此模型产生的 Embedding 的维度

        EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();


        EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
                .documentSplitter(DocumentSplitters.recursive(300, 0))

                .embeddingModel(embeddingModel)
                .embeddingStore(embeddingStore)
                .build();
    //    文档已被分割为片段、生成向量，并存储到 embeddingStore 中
        ingestor.ingest(document);
        // 3. 相似度查询（检索最相似的文本片段）
        String query = "什么是 LangChain4j？";
        Embedding queryEmbedding = embeddingModel.embed(query).content();
        embeddingStore.findRelevant(queryEmbedding,3);
//        Content
//        Content 表示与用户 Query 相关的内容。 目前，它仅限于文本内容（即 TextSegment）， 但将来可能支持其他模态（例如，图像、音频、视频等）。
//
//        Content Retriever
//        ContentRetriever 使用给定的 Query 从底层数据源检索 Content。 底层数据源可以是几乎任何东西：
//
//        嵌入存储
//                全文搜索引擎
//        向量和全文搜索的混合
//                网络搜索引擎
//        知识图谱
//        SQL 数据库
//        等等
//        ContentRetriever 返回的 Content 列表按相关性排序，从最高到最低。
//
//        Embedding Store Content Retriever
//        EmbeddingStoreContentRetriever 使用 EmbeddingModel 嵌入 Query， 从 EmbeddingStore 检索相关 Content。

        ChatLanguageModel chatModel = OpenAiChatModel.builder()
                .apiKey(openAiKey)
                .modelName(ModelList.ChooseModel(1))
                .build();

        //Expanding Query Transformer

        ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
                .embeddingStore(embeddingStore)
                .embeddingModel(embeddingModel)
                .maxResults(2)
                .minScore(0.6)
                .build();

//         从网络检索相关 Content。
//        以下是一个例子：
//        WebSearchEngine googleSearchEngine = GoogleCustomWebSearchEngine.builder()
//                .apiKey(System.getenv("GOOGLE_API_KEY"))
//                .csi(System.getenv("GOOGLE_SEARCH_ENGINE_ID"))
//                .build();
//
//        ContentRetriever contentRetriever = WebSearchContentRetriever.builder()
//                .webSearchEngine(googleSearchEngine)
//                .maxResults(3)
//                .build();

        //ExpandingQueryTransformer 使用 LLM 将给定的 Query 扩展为多个 Query。
        // 这很有用，因为 LLM 可以以各种方式重新表述和重新表达 Query， 这将有助于检索更多相关内容。

        // We will create a CompressingQueryTransformer, which is responsible for compressing
        // the user's query and the preceding conversation into a single, stand-alone query.
        // This should significantly improve the quality of the retrieval process.
        QueryTransformer queryTransformer = new CompressingQueryTransformer(chatModel);
        // The RetrievalAugmentor serves as the entry point into the RAG flow in LangChain4j.
        // It can be configured to customize the RAG behavior according to your requirements.
        // In subsequent examples, we will explore more customizations.

        RetrievalAugmentor retrievalAugmentor = DefaultRetrievalAugmentor.builder()
                .queryTransformer(queryTransformer)
                .contentRetriever(contentRetriever)
//--------------空缺-----------------------------------
                .build();
// 默认情况下，使用修改后的（keepAliveTime 为 1 秒而不是 60 秒）Executors.newCachedThreadPool()
//        DefaultRetrievalAugmentor.builder()
//        ...
//        .executor(executor)
//                .build;





//        // To register and get a free API key for Cohere, please visit the following link:
//        // https://dashboard.cohere.com/welcome/register
//        ScoringModel scoringModel = CohereScoringModel.builder()
//                .apiKey(System.getenv("COHERE_API_KEY"))
//                .modelName("rerank-multilingual-v3.0")
//                .build();
//
//        ContentAggregator contentAggregator = ReRankingContentAggregator.builder()
//                .scoringModel(scoringModel)
//                .minScore(0.8) // we want to present the LLM with only the truly relevant segments for the user's query
//                .build();
//
//        RetrievalAugmentor retrievalAugmentor = DefaultRetrievalAugmentor.builder()
//                .contentRetriever(contentRetriever)
//                .contentAggregator(contentAggregator)
//                .build();



        return AiServices.builder(Assistant.class)
                .chatLanguageModel(chatModel)
                .retrievalAugmentor(retrievalAugmentor)
                .chatMemory(MessageWindowChatMemory.withMaxMessages(10))
                .build();
    }

    /**
     *这是“ AI服务”。这是具有AI功能/功能的Java服务。
     *它可以像其他任何服务一样将其集成到您的代码中，并用作bean，并且可以模拟进行测试。
     *目标是将AI功能无缝地集成到您的（现有）代码库中，并使用最小的摩擦。
     *它在概念上类似于弹簧数据JPA或改造。
     *您定义一个接口，并选择用注释自定义它。
     * langchain4j然后使用代理和反射为该接口提供了实现。
     *这种方法抽象所有复杂性和样板。
     *因此，您无需兼顾模型，消息，内存，抹布组件，工具，输出解析器等。
     *但是，不用担心。它非常灵活且可配置，因此您可以定制它
     *针对您的特定用例。
     * <br>
     * More info here: https://docs.langchain4j.dev/tutorials/ai-services
     */



  public interface Assistant {
//如果你希望在使用 AI Services 时访问来源（用于增强消息的检索到的 Content）， 你可以通过将返回类型包装在 Result 类中轻松实现：
        //   Result<String> chat(String userMessage);

        // 否则
        Result<String> answer(String query);
    }

}
