package com.peng.config;

import dev.langchain4j.community.model.dashscope.QwenEmbeddingModel;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.data.document.parser.apache.pdfbox.ApachePdfBoxDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentByParagraphSplitter;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.onnx.HuggingFaceTokenizer;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import io.qdrant.client.QdrantClient;
import io.qdrant.client.grpc.Collections;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.Resource;
import org.springframework.context.annotation.Configuration;

import java.nio.file.FileSystems;
import java.nio.file.PathMatcher;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;

/**
 * @author love_ovo
 * @ClassName QdrantEmbeddingConfig.java
 * @createTime 2025年05月26日 15:46:00
 */
@Configuration
public class QdrantEmbeddingConfig {
    @Resource
    private QdrantClient qdrantClient;
    @Resource
    private QwenEmbeddingModel qwenEmbeddingModel;
    @Resource(name = "qdrantEmbeddingStore")
    private EmbeddingStore<TextSegment> embeddingStore;

    @PostConstruct
    public void initQdrantEmbeddingStore() throws ExecutionException, InterruptedException {
        var vectorParams = Collections.VectorParams.newBuilder()
                .setDistance(Collections.Distance.Cosine)
                .setSize(qwenEmbeddingModel.dimension())
                .build();

        //简易的数据库创建，防止文档重复录入的简易操作（实际开发中别用）
        List<String> lists = qdrantClient.listCollectionsAsync().get();
        if(lists.contains("xiaozhi-qdrant")) qdrantClient.deleteCollectionAsync("xiaozhi-qdrant").get();
        qdrantClient.createCollectionAsync("xiaozhi-qdrant", vectorParams).get();

        List<Document> documents = new ArrayList<>();

        // 从一个目录中加载所有的.txt文档
        PathMatcher pathMatcherTxt = FileSystems.getDefault().getPathMatcher("glob:*.txt");
        List<Document> documentTxts = FileSystemDocumentLoader.loadDocuments("G:\\代码\\smart-healthcare\\knowledge", pathMatcherTxt, new TextDocumentParser());
        // 从一个目录中加载所有的.pdf文档
        PathMatcher pathMatcherPdf = FileSystems.getDefault().getPathMatcher("glob:*.pdf");
        List<Document> documentPdfs = FileSystemDocumentLoader.loadDocuments("G:\\代码\\smart-healthcare\\knowledge", pathMatcherPdf, new ApachePdfBoxDocumentParser());

        documents.addAll(documentTxts);
        documents.addAll(documentPdfs);


        //自定义文档分割器
        //按段落分割文档：每个片段包含不超过 300个token，并且有 30个token的重叠部分保证连贯性
        //注意：当段落长度总和小于设定的最大长度时，就不会有重叠的必要。
        DocumentByParagraphSplitter documentSplitter = new DocumentByParagraphSplitter(
                300,
                30,
                //token分词器：按token计算
                new HuggingFaceTokenizer());

        EmbeddingStoreIngestor
                .builder()
                .embeddingStore(embeddingStore)
                .embeddingModel(qwenEmbeddingModel)
                .documentSplitter(documentSplitter)
                .build()
                .ingest(documents);
    }
}
