package org.fujay.commons.langchain4j.rag.processor.document;

import cn.hutool.core.util.IdUtil;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentParser;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.DocumentTransformer;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.document.loader.UrlDocumentLoader;
import dev.langchain4j.data.document.parser.apache.tika.ApacheTikaDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.data.segment.TextSegmentTransformer;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.IngestionResult;
import lombok.extern.slf4j.Slf4j;
import org.fujay.commons.langchain4j.rag.processor.DocumentProcessor;
import org.fujay.commons.langchain4j.rag.processor.DocumentProcessorConfig;
import org.fujay.commons.langchain4j.rag.processor.result.DocumentProcessResult;
import org.opensearch.client.opensearch.OpenSearchClient;
import org.opensearch.client.opensearch._types.Result;
import org.opensearch.client.opensearch.core.DeleteResponse;
import org.opensearch.client.opensearch.core.IndexResponse;
import org.opensearch.client.opensearch.indices.CreateIndexResponse;
import org.opensearch.client.opensearch.indices.DeleteIndexResponse;
import org.opensearch.client.transport.endpoints.BooleanResponse;

import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.LocalDateTime;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.Stream;

import static dev.langchain4j.internal.ValidationUtils.ensureNotNull;

/**
 * @author slm
 * @description 增强版文档处理器
 * 提供文档加载、处理和向量存储的能力
 */
@Slf4j
public class EnhancedDocumentProcessor implements DocumentProcessor {

    /**
     * 存储位置常量
     */
    public static final int STORAGE_LOCATION_LOCAL = 1;
    public static final int STORAGE_LOCATION_URL = 2;
    public static final int STORAGE_OSS_FILE = 3;

    private final DocumentProcessorConfig config;
    private final DocumentParser tikaDocumentParser;
    private final OpenSearchClient openSearchClient;
    private final String indexName;

    /**
     * 根据配置创建文档处理器
     */
    public EnhancedDocumentProcessor(DocumentProcessorConfig config, OpenSearchClient openSearchClient,
                                     String indexName) {
        this.config = ensureNotNull(config, "config");
        this.openSearchClient = ensureNotNull(openSearchClient, "openSearchClient");
        this.indexName = ensureNotNull(indexName, "indexName");
        this.tikaDocumentParser = new ApacheTikaDocumentParser();
        validateConfig();
        initIndex();
    }

    /**
     * 创建默认配置的文档处理器
     */
    public static EnhancedDocumentProcessor create(
            EmbeddingModel embeddingModel,
            EmbeddingStore<TextSegment> embeddingStore,
            String nameSpace,
            String indexName,
            OpenSearchClient openSearchClient) {

        DocumentProcessorConfig config = DocumentProcessorConfig.builder()
                .embeddingModel(embeddingModel)
                .embeddingStore(embeddingStore)
                .nameSpace(nameSpace)
                .documentSplitter(DocumentSplitters.recursive(500, 0))
                .build();

        return new EnhancedDocumentProcessor(config, openSearchClient, indexName);
    }

    /**
     * 初始化OpenSearch索引
     */
    private void initIndex() {
        try {
            // 1. 检查索引是否存在
            boolean exists = openSearchClient.indices()
                    .exists(builder -> builder.index(indexName))
                    .value();

            if (!exists) {
                // 2. 创建索引
                CreateIndexResponse createResponse = openSearchClient.indices()
                        .create(builder -> builder
                                .index(indexName)
                                .settings(s -> s
                                        .numberOfShards("1")
                                        .numberOfReplicas("0"))
                                .mappings(m -> m
                                        .properties("content", p -> p
                                                .text(t -> t
                                                        .analyzer("ik_smart")
                                                        .searchAnalyzer("ik_smart")
                                                        .similarity("BM25")))
                                        .properties("metadata", p -> p
                                                .object(o -> o
                                                        .properties("documentId", id -> id.keyword(k -> k))
                                                        .properties("loadTime", lt -> lt.keyword(k -> k))))));

                if (Boolean.FALSE.equals(createResponse.acknowledged())) {
                    throw new RuntimeException("创建索引失败");
                }
            }
        } catch (IOException e) {
            log.error("初始化索引失败", e);
            throw new RuntimeException("初始化索引失败", e);
        }
    }

    /**
     * 验证配置
     */
    private void validateConfig() {
        if (config.getEmbeddingModel() == null) {
            throw new IllegalArgumentException("嵌入模型不能为空");
        }
        if (config.getEmbeddingStore() == null) {
            throw new IllegalArgumentException("嵌入存储不能为空");
        }
        if (config.getDocumentSplitter() == null) {
            log.info("未指定文档分块器，将使用默认的递归分块器（500字符，0重叠）");
        }
    }

    /**
     * 处理单个文档
     */
    @Override
    public DocumentProcessResult process(Document document) {
        return process(Collections.singletonList(document));
    }

    /**
     * 处理文档列表
     */
    @Override
    public DocumentProcessResult process(List<Document> documents) {
        return process(documents, config.getEmbeddingStore());
    }

    /**
     * 处理单个文档，使用指定的嵌入存储
     */
    @Override
    public DocumentProcessResult process(Document document, EmbeddingStore<TextSegment> embeddingStore) {
        return process(Collections.singletonList(document), embeddingStore);
    }

    /**
     * 处理文档列表，使用指定的嵌入存储
     */
    @Override
    public DocumentProcessResult process(List<Document> documents, EmbeddingStore<TextSegment> embeddingStore) {
        if (documents == null || documents.isEmpty()) {
            return DocumentProcessResult.empty();
        }

        log.info("开始处理 {} 个文档", documents.size());
        LocalDateTime startTime = LocalDateTime.now();

        try {
            // 1. 应用文档转换器（如果配置了）
            List<Document> transformedDocuments = documents;
            if (config.getDocumentTransformer() != null) {
                transformedDocuments = config.getDocumentTransformer().transformAll(documents);
                log.debug("文档转换完成，转换后数量: {}", transformedDocuments.size());
            }

            // 2. 创建Ingestor并执行向量化和存储
            EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
                    .documentTransformer(config.getDocumentTransformer())
                    .documentSplitter(config.getDocumentSplitter() != null ? config.getDocumentSplitter()
                            : DocumentSplitters.recursive(500, 0))
                    .textSegmentTransformer(config.getTextSegmentTransformer())
                    .embeddingModel(config.getEmbeddingModel())
                    .embeddingStore(embeddingStore)
                    .build();

            IngestionResult ingestionResult = ingestor.ingest(transformedDocuments);

            List<String> documentIds = transformedDocuments.stream().map(Document::metadata).map(m -> m.getString("documentId")).toList();

            // 3. 索引到OpenSearch
            for (Document document : transformedDocuments) {
                indexToOpenSearch(document);
            }

            LocalDateTime endTime = LocalDateTime.now();

            // 4. 计算统计信息

            int segmentCount = documents.size();
            long totalCharacters = documents.stream()
                    .mapToLong(doc -> doc.text().length())
                    .sum();
            // 5. 构建结果对象
            return DocumentProcessResult.builder()
                    .startTime(startTime)
                    .endTime(endTime)
                    .documentCount(documents.size())
                    .segmentCount(segmentCount)
                    .tokenUsage(ingestionResult.tokenUsage())
                    .successCount(documents.size())
                    .failureCount(0)
                    .totalCharacters(totalCharacters)
                    .documentIds(documentIds)
                    .build();

        } catch (Exception e) {
            log.error("文档处理失败", e);
            LocalDateTime endTime = LocalDateTime.now();

            return DocumentProcessResult.builder()
                    .startTime(startTime)
                    .endTime(endTime)
                    .documentCount(documents.size())
                    .successCount(0)
                    .failureCount(documents.size())
                    .errors(Collections.singletonList(e))
                    .build();
        }
    }

    /**
     * 索引文档到OpenSearch
     */
    private void indexToOpenSearch(Document document) {
        try {
            // 1. 构建索引请求
            Map<String, Object> source = new HashMap<>();
            source.put("content", document.text());
            source.put("metadata", document.metadata().toMap());

            String documentId = document.metadata().getString("documentId");

            IndexResponse response = openSearchClient.index(builder -> builder
                    .index(indexName)
                    .id(documentId)
                    .document(source));

            // 3. 检查响应结果
            if (response.result() == Result.Created || response.result() == Result.Updated) {
                log.info("文档 {} 成功索引到索引库 {}，ID: {}", documentId, indexName, response.id());
            } else {
                log.warn("索引文档 {} 到索引 {} 失败或未成功: 结果 - {}, ShardInfo - {}",
                        documentId, indexName, response.result(), response.shards());
            }
        } catch (IOException e) {
            log.error("索引文档 {} 到索引 {} 时发生IO异常", document.metadata().getString("documentId"), indexName, e);
        } catch (Exception e) {
            log.error("索引文档 {} 到索引 {} 时发生未知异常", document.metadata().getString("documentId"), indexName, e);
        }
    }

    /**
     * 从本地路径加载文档
     */
    public Document loadLocalDocument(String path) {
        try {
            if (path == null || path.isEmpty()) {
                log.warn("文件路径为空");
                return null;
            }

            Path filePath = Paths.get(path);
            if (!Files.exists(filePath)) {
                log.warn("文件不存在: {}", path);
                return null;
            }

            log.debug("加载本地文档: {}", path);

            // 使用TikaDocumentParser加载各类型文档
            Document document = FileSystemDocumentLoader.loadDocument(path, tikaDocumentParser);

            // 添加文档ID，便于后续删除
            Map<String, Object> metadata = new HashMap<>(document.metadata().toMap());
            metadata.put("documentId", IdUtil.fastSimpleUUID());
            metadata.put("nameSpace", config.getNameSpace());
            metadata.put("loadTime", LocalDateTime.now().toString());

            return Document.from(document.text(), dev.langchain4j.data.document.Metadata.from(metadata));
        } catch (Exception e) {
            log.error("加载本地文档失败: {}", path, e);
            return null;
        }
    }

    /**
     * 从URL加载文档
     */
    public Document loadUrlDocument(String url) {
        try {
            if (url == null || url.isEmpty()) {
                log.warn("URL为空");
                return null;
            }

            log.debug("加载URL文档: {}", url);
            // 使用TikaDocumentParser加载URL文档
            Document document = UrlDocumentLoader.load(url, tikaDocumentParser);

            // 添加文档ID，便于后续删除
            Map<String, Object> metadata = new HashMap<>(document.metadata().toMap());
            metadata.put("documentId", UUID.randomUUID().toString());
            metadata.put("loadTime", LocalDateTime.now().toString());
            metadata.put("nameSpace", config.getNameSpace());

            return Document.from(document.text(), dev.langchain4j.data.document.Metadata.from(metadata));
        } catch (Exception e) {
            log.error("加载URL文档失败: {}", url, e);
            return null;
        }
    }

    /**
     * 根据存储位置加载文档
     */
    @Override
    public Document loadDocument(String path, int storageStrategy) {
        if (storageStrategy == STORAGE_LOCATION_LOCAL) {
            return loadLocalDocument(path);
        } else {
            return loadUrlDocument(path);
        }
    }

    /**
     * 处理文件路径
     */
    @Override
    public DocumentProcessResult processFile(Path filePath) {
        if (filePath == null || !Files.exists(filePath)) {
            log.warn("文件不存在: {}", filePath);
            return buildEmptyFailureResult();
        }

        if (!isSupportedFile(filePath.toString())) {
            log.warn("不支持的文件类型: {}", filePath);
            return DocumentProcessResult.builder()
                    .documentCount(1)
                    .skippedCount(1)
                    .endTime(LocalDateTime.now())
                    .build();
        }

        Document document = loadLocalDocument(filePath.toString());
        if (document == null) {
            return buildEmptyFailureResult();
        }

        return process(document);
    }

    /**
     * 处理多个文件路径
     */
    @Override
    public DocumentProcessResult processFiles(List<Path> filePaths) {
        if (filePaths == null || filePaths.isEmpty()) {
            return DocumentProcessResult.empty();
        }

        log.info("开始处理 {} 个文件", filePaths.size());

        // 是否并行处理
        if (config.getParallelism() > 1) {
            return processFilesInParallel(filePaths);
        }

        // 串行处理
        List<Document> documents = new ArrayList<>();
        List<Path> skippedFiles = new ArrayList<>();
        List<Path> failedFiles = new ArrayList<>();
        List<Exception> errors = new ArrayList<>();

        for (Path filePath : filePaths) {
            try {
                if (!isSupportedFile(filePath.toString())) {
                    log.warn("跳过不支持的文件类型: {}", filePath);
                    skippedFiles.add(filePath);
                    continue;
                }

                Document document = loadLocalDocument(filePath.toString());
                if (document != null) {
                    documents.add(document);
                } else {
                    log.warn("文件加载失败: {}", filePath);
                    failedFiles.add(filePath);
                }
            } catch (Exception e) {
                log.error("文件处理失败: {}", filePath, e);
                failedFiles.add(filePath);
                errors.add(e);
            }
        }

        // 处理加载的文档
        DocumentProcessResult result = process(documents);

        // 合并结果
        return DocumentProcessResult.builder()
                .startTime(result.getStartTime())
                .endTime(result.getEndTime())
                .documentCount(filePaths.size())
                .segmentCount(result.getSegmentCount())
                .tokenUsage(result.getTokenUsage())
                .successCount(result.getSuccessCount())
                .failureCount(result.getFailureCount() + failedFiles.size())
                .skippedCount(skippedFiles.size())
                .errors(result.getErrors() != null
                        ? Stream.concat(result.getErrors().stream(), errors.stream()).collect(Collectors.toList())
                        : errors)
                .totalCharacters(result.getTotalCharacters())
                .build();
    }

    /**
     * 并行处理文件
     */
    private DocumentProcessResult processFilesInParallel(List<Path> filePaths) {
        log.info("并行处理 {} 个文件，并行度: {}", filePaths.size(), config.getParallelism());
        ExecutorService executor = config.getExecutorService();

        try {
            List<Future<DocumentProcessResult>> futures = new ArrayList<>();
            for (Path filePath : filePaths) {
                futures.add(executor.submit(() -> processFile(filePath)));
            }

            List<DocumentProcessResult> results = new ArrayList<>();
            for (Future<DocumentProcessResult> future : futures) {
                try {
                    results.add(future.get(config.getTimeoutMs(), TimeUnit.MILLISECONDS));
                } catch (Exception e) {
                    log.error("并行任务失败", e);
                    results.add(buildEmptyFailureResult(e));
                }
            }

            return DocumentProcessResult.merge(results);
        } finally {
            if (config.isAutoShutdownExecutor() && executor != config.getExecutorService()) {
                executor.shutdown();
            }
        }
    }

    /**
     * 处理目录
     */
    @Override
    public DocumentProcessResult processDirectory(Path directoryPath, boolean recursive) {
        if (directoryPath == null || !Files.isDirectory(directoryPath)) {
            log.warn("目录不存在或不是目录: {}", directoryPath);
            return buildEmptyFailureResult();
        }

        try {
            List<Path> filePaths = collectFiles(directoryPath, recursive);
            log.info("从目录 {} 收集到 {} 个文件", directoryPath, filePaths.size());
            return processFiles(filePaths);
        } catch (Exception e) {
            log.error("处理目录失败: {}", directoryPath, e);
            return buildEmptyFailureResult(e);
        }
    }

    /**
     * 收集目录中的文件
     */
    private List<Path> collectFiles(Path directoryPath, boolean recursive) {
        List<Path> files = new ArrayList<>();
        try {
            Files.list(directoryPath).forEach(path -> {
                try {
                    if (Files.isRegularFile(path) && isSupportedFile(path.toString())) {
                        files.add(path);
                    } else if (recursive && Files.isDirectory(path)) {
                        files.addAll(collectFiles(path, true));
                    }
                } catch (Exception e) {
                    log.error("访问路径失败: {}", path, e);
                }
            });
        } catch (Exception e) {
            log.error("列出目录内容失败: {}", directoryPath, e);
        }
        return files;
    }

    /**
     * 异步处理文档
     */
    @Override
    public CompletableFuture<DocumentProcessResult> processAsync(List<Document> documents) {
        return CompletableFuture.supplyAsync(() -> process(documents), config.getExecutorService());
    }

    /**
     * 删除文档（根据ID）
     */
    @Override
    public void removeDocument(String documentId, Boolean index) {
        log.info("请求删除文档: {}", documentId);
        config.getEmbeddingStore().remove(documentId);
        if (index) {
            deleteIndex(indexName);
        } else {
            deleteDocumentInIndex(indexName, documentId);
        }
    }

    @Override
    public void removeDocuments(List<String> documents, Boolean index) {
        config.getEmbeddingStore().removeAll(documents);
        if (index) {
            deleteIndex(indexName);
        } else {
            documents.forEach(document -> deleteDocumentInIndex(indexName, document));
        }
    }

    /**
     * 删除OpenSearch中的整个索引。
     * 这通常对应于删除整个知识库。
     *
     * @param indexName 要删除的索引的名称（例如，知识库ID）。
     */
    private void deleteIndex(String indexName) {
        if (indexName == null || indexName.trim().isEmpty()) {
            log.error("删除索引失败：索引名称不能为空。");
            return;
        }
        try {
            BooleanResponse existsResponse = openSearchClient.indices().exists(r -> r.index(indexName));
            if (!existsResponse.value()) {
                log.info("索引 {} 不存在，无需删除。", indexName);
                return;
            }
            log.info("准备删除索引: {}", indexName);
            DeleteIndexResponse deleteIndexResponse = openSearchClient.indices().delete(d -> d.index(indexName));

            if (deleteIndexResponse.acknowledged()) {
                log.info("索引 {} 已成功删除。", indexName);
            } else {
                log.warn("删除索引 {} 的请求未被确认。", indexName);
            }
        } catch (IOException e) {
            log.error("删除索引 {} 时发生IO异常。", indexName, e);
        } catch (Exception e) {
            log.error("删除索引 {} 时发生未知异常。", indexName, e);
        }
    }

    /**
     * 从指定的OpenSearch索引中删除单个文档。
     * 这通常对应于从知识库中删除某个特定的文件/文档。
     *
     * @param indexName  文档所在的索引名称（例如，知识库ID）。
     * @param documentId 要删除的文档的ID (在OpenSearch中的_id，通常是文件的唯一标识符)。
     */
    private void deleteDocumentInIndex(String indexName, String documentId) {
        if (indexName == null || indexName.trim().isEmpty()) {
            log.error("删除文档失败：索引名称不能为空。");
            return;
        }
        if (documentId == null || documentId.trim().isEmpty()) {
            log.error("删除文档失败：文档ID不能为空。索引名: {}", indexName);
            return;
        }

        try {
            log.info("准备从索引 {} 中删除文档: {}", indexName, documentId);
            DeleteResponse deleteResponse = openSearchClient.delete(d -> d
                    .index(indexName)
                    .id(documentId));

            if (deleteResponse.result() == Result.Deleted) {
                log.info("文档 {} 已成功从索引 {} 中删除。", documentId, indexName);
            } else if (deleteResponse.result() == Result.NotFound) {
                log.info("文档 {} 在索引 {} 中未找到，无需删除。", documentId, indexName);
            } else {
                log.warn("从索引 {} 中删除文档 {} 失败或结果未知。结果: {}, ShardInfo: {}",
                        indexName, documentId, deleteResponse.result(), deleteResponse.shards());
            }
        } catch (IOException e) {
            log.error("从索引 {} 中删除文档 {} 时发生IO异常。", indexName, documentId, e);
        } catch (Exception e) {
            log.error("从索引 {} 中删除文档 {} 时发生未知异常。", indexName, documentId, e);
        }
    }

    /**
     * 获取配置
     */
    @Override
    public DocumentProcessorConfig getConfig() {
        return config;
    }

    /**
     * 检查文件是否支持处理
     */
    private boolean isSupportedFile(String filePath) {
        // 检查是否在支持的扩展名列表中
        String ext = getFileExtension(filePath).toLowerCase();
        return config.getSupportedExtensions().isEmpty() ||
                config.getSupportedExtensions().contains(ext);
    }

    /**
     * 获取文件扩展名
     */
    private String getFileExtension(String filename) {
        int lastDot = filename.lastIndexOf('.');
        if (lastDot == -1 || lastDot == filename.length() - 1) {
            return "";
        }
        return filename.substring(lastDot + 1);
    }

    /**
     * 构建空的失败结果
     */
    private DocumentProcessResult buildEmptyFailureResult() {
        return DocumentProcessResult.builder()
                .documentCount(1)
                .failureCount(1)
                .endTime(LocalDateTime.now())
                .build();
    }

    /**
     * 构建包含错误的失败结果
     */
    private DocumentProcessResult buildEmptyFailureResult(Exception error) {
        return DocumentProcessResult.builder()
                .documentCount(1)
                .failureCount(1)
                .errors(Collections.singletonList(error))
                .endTime(LocalDateTime.now())
                .build();
    }

    /**
     * 构建器
     */
    public static class Builder {
        private DocumentTransformer documentTransformer;
        private DocumentSplitter documentSplitter;
        private TextSegmentTransformer textSegmentTransformer;
        private EmbeddingModel embeddingModel;
        private EmbeddingStore<TextSegment> embeddingStore;
        private int parallelism = Runtime.getRuntime().availableProcessors();
        private ExecutorService executorService = null;
        private boolean autoShutdownExecutor = true;
        private int maxRetries = 3;
        private long timeoutMs = 60000;
        private Set<String> supportedExtensions = new HashSet<>();
        private OpenSearchClient openSearchClient;
        private String indexName;

        public Builder documentTransformer(DocumentTransformer documentTransformer) {
            this.documentTransformer = documentTransformer;
            return this;
        }

        public Builder documentSplitter(DocumentSplitter documentSplitter) {
            this.documentSplitter = documentSplitter;
            return this;
        }

        public Builder textSegmentTransformer(TextSegmentTransformer textSegmentTransformer) {
            this.textSegmentTransformer = textSegmentTransformer;
            return this;
        }

        public Builder embeddingModel(EmbeddingModel embeddingModel) {
            this.embeddingModel = embeddingModel;
            return this;
        }

        public Builder embeddingStore(EmbeddingStore<TextSegment> embeddingStore) {
            this.embeddingStore = embeddingStore;
            return this;
        }

        public Builder parallelism(int parallelism) {
            this.parallelism = parallelism;
            return this;
        }

        public Builder executorService(ExecutorService executorService) {
            this.executorService = executorService;
            return this;
        }

        public Builder autoShutdownExecutor(boolean autoShutdownExecutor) {
            this.autoShutdownExecutor = autoShutdownExecutor;
            return this;
        }

        public Builder maxRetries(int maxRetries) {
            this.maxRetries = maxRetries;
            return this;
        }

        public Builder timeoutMs(long timeoutMs) {
            this.timeoutMs = timeoutMs;
            return this;
        }

        public Builder supportedExtensions(Set<String> supportedExtensions) {
            this.supportedExtensions = supportedExtensions;
            return this;
        }

        public Builder openSearchClient(OpenSearchClient openSearchClient) {
            this.openSearchClient = openSearchClient;
            return this;
        }

        public Builder indexName(String indexName) {
            this.indexName = indexName;
            return this;
        }

        public EnhancedDocumentProcessor build() {
            DocumentProcessorConfig config = DocumentProcessorConfig.builder()
                    .documentTransformer(documentTransformer)
                    .documentSplitter(documentSplitter)
                    .textSegmentTransformer(textSegmentTransformer)
                    .embeddingModel(embeddingModel)
                    .embeddingStore(embeddingStore)
                    .parallelism(parallelism)
                    .executorService(executorService)
                    .autoShutdownExecutor(autoShutdownExecutor)
                    .maxRetries(maxRetries)
                    .timeoutMs(timeoutMs)
                    .build();

            if (supportedExtensions != null && !supportedExtensions.isEmpty()) {
                config.setSupportedExtensions(supportedExtensions);
            }

            return new EnhancedDocumentProcessor(config, openSearchClient, indexName);
        }
    }
}