package com.xwj.aiforrag.utils;

import org.springframework.ai.document.Document;
import org.springframework.ai.tokenizer.JTokkitTokenCountEstimator;
import org.springframework.ai.transformer.splitter.TokenTextSplitter;

import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;

public class TokenTextSplitterWithContent {
    /**
     * Using openai upper limit of input token count as the default.
     */
    private static final int MAX_INPUT_TOKEN_COUNT = 8191;
    private final TokenTextSplitter tokenTextSplitter;
    private final JTokkitTokenCountEstimator tokenEstimator;

    public TokenTextSplitterWithContent(TokenTextSplitter tokenTextSplitter) {
        this.tokenTextSplitter = tokenTextSplitter;
        this.tokenEstimator = new JTokkitTokenCountEstimator();
    }

    public List<Document> tokenSplitterApply(List<Document> documents) {
        List<Document> result = new ArrayList<>();
        Queue<Document> queue = new LinkedList<>(documents);

        while (!queue.isEmpty()) {
            Document doc = queue.poll();
            if (tokenEstimator.estimate(doc.getText()) >= MAX_INPUT_TOKEN_COUNT) {
                // 分割文档并添加到队列继续处理
                List<Document> splitDocs = tokenTextSplitter.apply(List.of(doc));
                queue.addAll(splitDocs);
            } else {
                result.add(doc);
            }
        }
        return result;
    }
}