package dev.langchain4j.example.agent;

import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.TokenCountEstimator;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;

import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;

@Service
public class EmbeddingService {

    private final EmbeddingModel embeddingModel;
    private final TokenCountEstimator tokenizer;
    private final EmbeddingStore<TextSegment> embeddingStore;

    public EmbeddingService(EmbeddingModel embeddingModel, TokenCountEstimator tokenizer, EmbeddingStore<TextSegment> embeddingStore) {
        this.embeddingModel = embeddingModel;
        this.tokenizer = tokenizer;   //TokenCountEstimator 是 LangChain4j 中用于估算文本 token 数量的工具。它的主要作用包括：
        this.embeddingStore = embeddingStore;
    }

    public void processDocument(MultipartFile file) throws IOException {
        // 将MultipartFile转换为临时文件
        Path tempFile = Files.createTempFile("upload-", file.getOriginalFilename());
        file.transferTo(tempFile);

        try {
            // 使用InputStream加载文档
            try (InputStream inputStream = Files.newInputStream(tempFile)) {
                Document document = loadDocument(inputStream, new TextDocumentParser());

                DocumentSplitter documentSplitter = DocumentSplitters.recursive(500, 100, tokenizer);
                EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
                        .documentSplitter(documentSplitter)
                        .embeddingModel(embeddingModel)
                        .embeddingStore(embeddingStore)
                        .build();
                ingestor.ingest(document);
            }
        } finally {
            // 清理临时文件
            Files.deleteIfExists(tempFile);
        }
    }

    public void processDocument(String content) throws IOException {

        // 使用InputStream加载文档
        try (InputStream inputStream = new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8))) {

            Document document = loadDocument(inputStream, new TextDocumentParser());

            DocumentSplitter documentSplitter = DocumentSplitters.recursive(500, 100, tokenizer);
            EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
                    .documentSplitter(documentSplitter)
                    .embeddingModel(embeddingModel)
                    .embeddingStore(embeddingStore)
                    .build();
            ingestor.ingest(document);
        }

    }

    private Document loadDocument(InputStream inputStream, TextDocumentParser parser) throws IOException {
        return parser.parse(inputStream);
    }

    public EmbeddingStore<TextSegment> getEmbeddingStore() {
        return embeddingStore;
    }
}