package com.study.springairag.etl.transformers;

import org.springframework.ai.document.ContentFormatter;
import org.springframework.ai.document.DefaultContentFormatter;
import org.springframework.ai.document.Document;
import org.springframework.ai.transformer.ContentFormatTransformer;
import org.springframework.ai.transformer.splitter.TokenTextSplitter;

import java.util.List;
import java.util.Map;

/**
 * @author dejavu
 * @since 2025/7/24
 */
public class TokenTextSplitterTest {

    public static void main(String[] args) {

        List<Document> documents = testTokenTextSplitter();
        //内容格式转换器 确保所有文档的内容格式统一。
        ContentFormatTransformer contentFormatTransformer = new ContentFormatTransformer(DefaultContentFormatter.builder().build());
        List<Document> apply = contentFormatTransformer.apply(documents);
        for (Document document : apply) {
            System.out.println(document.getText());
        }

    }

    public static List<Document> testTokenTextSplitter() {
        Document doc1 = new Document("This is a long piece of text that needs to be split into smaller chunks for processing.",
                Map.of("source", "example.txt"));
        Document doc2 = new Document("Another document with content that will be split based on token count.",
                Map.of("source", "example2.txt"));

        TokenTextSplitter splitter = new TokenTextSplitter();
        List<Document> splitDocuments = splitter.apply(List.of(doc1, doc2));

        for (Document doc : splitDocuments) {
            System.out.println("Chunk: " + doc.getText());
            System.out.println("Metadata: " + doc.getMetadata());
        }
        return splitDocuments;
    }
}
