package me.flyray.bsin.server.biz;

import co.elastic.clients.elasticsearch.ElasticsearchClient;
import co.elastic.clients.elasticsearch._types.mapping.*;
import co.elastic.clients.elasticsearch.indices.CreateIndexResponse;
import co.elastic.clients.elasticsearch.indices.IndexSettings;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.ai.document.Document;
import org.springframework.ai.reader.tika.TikaDocumentReader;
import org.springframework.ai.transformer.splitter.TokenTextSplitter;
import org.springframework.ai.vectorstore.VectorStore;
import org.springframework.ai.vectorstore.SearchRequest;
import org.springframework.ai.vectorstore.elasticsearch.autoconfigure.ElasticsearchVectorStoreProperties;
import org.springframework.stereotype.Component;
import org.springframework.web.client.RestTemplate;

import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

@Slf4j
@Component
public class DocumentVectorBiz {

    private static final String textField = "content";
    private static final String vectorField = "embedding";
    private final RestTemplate restTemplate;
    private final TokenTextSplitter textSplitter;

    private final ElasticsearchClient elasticsearchClient;
    private final ElasticsearchVectorStoreProperties options;

    public DocumentVectorBiz(RestTemplate restTemplate, ElasticsearchClient elasticsearchClient, ElasticsearchVectorStoreProperties options) {
        this.restTemplate = restTemplate;
        this.elasticsearchClient = elasticsearchClient;
        this.options = options;
        // 使用正确的构造函数参数：chunkSize, minChunkSizeChars, minChunkLengthToEmbed, maxNumChunks, keepSeparator
        this.textSplitter = new TokenTextSplitter(1000, 200, 5, 10000, true);
    }

    /**
     * 根据文档 URL 解析并向量化文档
     * @param documentUrl 文档的 URL
     * @param metadata 额外的元数据
     * @return 处理成功的文档块数量
     */
    public List<Document> processDocumentFromUrl(String documentUrl, Map<String, Object> metadata) {
        try {
            // 1. 从 URL 下载文档
            byte[] documentBytes = downloadDocument(documentUrl);

            // 2. 使用 Tika 解析文档
            List<Document> documents = parseDocument(documentBytes, documentUrl, metadata);

            // 3. 分割文档为更小的块
            List<Document> splitDocuments = splitDocuments(documents);

            return splitDocuments;

        } catch (Exception e) {
            throw new RuntimeException("处理文档失败: " + e.getMessage(), e);
        }
    }

    /**
     * 从 URL 下载文档
     */
    private byte[] downloadDocument(String documentUrl) throws IOException {
        try {
            return restTemplate.getForObject(documentUrl, byte[].class);
        } catch (Exception e) {
            throw new IOException("下载文档失败: " + documentUrl, e);
        }
    }

    /**
     * 使用 Tika 解析文档
     */
    private List<Document> parseDocument(byte[] documentBytes, String documentUrl, Map<String, Object> metadata) {

        // 创建 Tika 文档读取器
        TikaDocumentReader tikaReader = new TikaDocumentReader(documentUrl);

        // 读取文档
        List<Document> documents = tikaReader.get();

        // 为每个文档添加元数据
        documents.forEach(doc -> {
            doc.getMetadata().put("source_url", documentUrl);
            doc.getMetadata().put("processed_time", System.currentTimeMillis());
            if (metadata != null) {
                doc.getMetadata().putAll(metadata);
            }
        });

        return documents;

    }

    /**
     * 分割文档为更小的块
     */
    private List<Document> splitDocuments(List<Document> documents) {
        return textSplitter.apply(documents);
    }

    public void createIndexIfNotExists() {
        try {
            String indexName = options.getIndexName();
            Integer dimsLength = options.getDimensions();

            if (StringUtils.isBlank(indexName)) {
                throw new IllegalArgumentException("Elastic search index name must be provided");
            }

            boolean exists = elasticsearchClient.indices().exists(idx -> idx.index(indexName)).value();
            if (exists) {
                log.debug("Index {} already exists. Skipping creation.", indexName);
                return;
            }

            String similarityAlgo = options.getSimilarity().name();
            IndexSettings indexSettings = IndexSettings
                    .of(settings -> settings.numberOfShards(String.valueOf(1)).numberOfReplicas(String.valueOf(1)));

            // Maybe using json directly?
            Map<String, Property> properties = new HashMap<>();
            // 向量字段
            properties.put(vectorField, Property.of(property -> property.denseVector(
                    DenseVectorProperty.of(dense -> dense.index(true).dims(dimsLength).similarity(similarityAlgo)))));
            // 文本字段
            properties.put(textField, Property.of(property -> property.text(TextProperty.of(t -> t))));

            Map<String, Property> metadata = new HashMap<>();
            metadata.put("ref_doc_id", Property.of(property -> property.keyword(KeywordProperty.of(k -> k))));
            // 元数据字段
            properties.put("metadata",
                    Property.of(property -> property.object(ObjectProperty.of(op -> op.properties(metadata)))));

            CreateIndexResponse indexResponse = elasticsearchClient.indices()
                    .create(createIndexBuilder -> createIndexBuilder.index(indexName)
                            .settings(indexSettings)
                            .mappings(TypeMapping.of(mappings -> mappings.properties(properties))));

            if (!indexResponse.acknowledged()) {
                throw new RuntimeException("failed to create index");
            }

            log.info("create elasticsearch index {} successfully", indexName);
        }
        catch (IOException e) {
            log.error("failed to create index", e);
            throw new RuntimeException(e);
        }
    }

}
