package cn.codecrab.ai.langchain4j.controller;

import dev.langchain4j.community.dashscope.spring.Properties;
import dev.langchain4j.community.model.dashscope.QwenTokenizer;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.document.parser.apache.tika.ApacheTikaDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentByParagraphSplitter;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.Tokenizer;
import dev.langchain4j.model.embedding.onnx.HuggingFaceTokenizer;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.tags.Tag;
import jakarta.annotation.Resource;
import org.apache.commons.lang3.StringUtils;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;

import java.io.File;
import java.nio.file.FileSystems;
import java.nio.file.PathMatcher;
import java.util.List;
import java.util.stream.Collectors;

/**
 * @author 王刚
 * @since 2025年05月10日 17:07
 */
@Tag(name = "文档读取")
@RestController
@RequestMapping("/document")
public class DocumentController {

    @Resource
    private Properties properties;

    @Operation(summary = "读取")
    @GetMapping("/read")
    public String read(@RequestParam String filePath, @RequestParam(required = false) String fileExt) {
        File file = new File(filePath);

        List<Document> documents;

        if (file.isDirectory()) {
            PathMatcher pathMatcher;
            if (StringUtils.isNotEmpty(fileExt)) {
                pathMatcher = FileSystems.getDefault().getPathMatcher("glob:*." + fileExt);
            } else {
                pathMatcher = path -> true;
            }
            documents = FileSystemDocumentLoader.loadDocuments(file.getPath(), pathMatcher, new ApacheTikaDocumentParser());
        } else {
            Document document = FileSystemDocumentLoader.loadDocument(file.getPath());
            documents = List.of(document);
        }

        return documents.stream().map(document -> document.metadata() + "\n" + document.text()).collect(Collectors.joining("\n\n\n\n\n\n========================\n"));
    }

    @Operation(summary = "向量存储")
    @GetMapping("/embedding-store")
    public String embeddingStore(@RequestParam String filePath) {
        Document document = FileSystemDocumentLoader.loadDocument(filePath);
        EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();

        // 按段落分割文档：每个片段包含不超过300个token，并且有30个token的重叠部分保证连贯性
        DocumentByParagraphSplitter documentSplitter = new DocumentByParagraphSplitter(
            300,
            30,
            // token分词器：按token计算，不传该参数则按照字符计算
            new HuggingFaceTokenizer()
        );

        // 按字符计算
        // DocumentByParagraphSplitter documentSplitter = new DocumentByParagraphSplitter(300, 30, new HuggingFaceTokenizer());

        EmbeddingStoreIngestor.builder().documentSplitter(documentSplitter).embeddingStore(embeddingStore).build().ingest(document);
        System.out.println(embeddingStore);
        return "";
    }

    @Operation(summary = "获取消息的Token数")
    @GetMapping("/token-count")
    public long tokenCount() {
        Tokenizer tokenizer = new QwenTokenizer(properties.getChatModel().getApiKey(), properties.getChatModel().getModelName());
        return tokenizer.estimateTokenCountInMessage(UserMessage.userMessage("这是一个测试文本，用于测试Token长度"));
    }

}
