package com.jtzc.aikf.file;

import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.splitter.DocumentByLineSplitter;
import dev.langchain4j.data.document.splitter.DocumentByParagraphSplitter;
import dev.langchain4j.data.document.splitter.DocumentBySentenceSplitter;
import dev.langchain4j.model.embedding.onnx.HuggingFaceTokenizer;
import org.springframework.stereotype.Component;

import java.util.HashMap;
import java.util.Map;

/**
 * @author wu chuang
 * @description
 */
@Component
public class DocumentSplitterFactory {
    private static Map<String, DocumentSplitter> map = new HashMap<>();

    static {
        //按段落分割文档：每个片段包含不超过 300个token，并且有 30个token的重叠部分保证连贯性
        //注意：当段落长度总和小于设定的最大长度时，就不会有重叠的必要。
        DocumentByParagraphSplitter documentSplitter = new DocumentByParagraphSplitter(
                300,
                30,
                //token分词器：按token计算
                new HuggingFaceTokenizer());
        DocumentByLineSplitter documentByLineSplitter = new DocumentByLineSplitter(100,10,new HuggingFaceTokenizer());
        DocumentBySentenceSplitter documentBySentenceSplitter = new DocumentBySentenceSplitter(1000, 20, new HuggingFaceTokenizer());
        map.put("ParagraphSplitter",documentSplitter);
        map.put("LineSplitter",documentByLineSplitter);
        map.put("SentenceSplitter",documentBySentenceSplitter);
    }
    public DocumentSplitter getDocumentSplitter(String key) {
        return map.get(key);
    }

}
