package com.hnit.service;


import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.splitter.DocumentByLineSplitter;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStore;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.util.List;
import java.util.Map;

@Service
public class VectorStoreService {

    private final EmbeddingModel qwenEmbeddingModel;
    private final EmbeddingStore<TextSegment> embeddingStore;
    private final ObjectMapper objectMapper;
    private final DocumentSplitter documentSplitter;

    @Autowired
    public VectorStoreService(EmbeddingModel qwenEmbeddingModel,
                              EmbeddingStore<TextSegment> embeddingStore,
                              ObjectMapper objectMapper) {
        this.qwenEmbeddingModel = qwenEmbeddingModel;
        this.embeddingStore = embeddingStore;
        this.objectMapper = objectMapper;
        this.documentSplitter = new DocumentByLineSplitter(100, 20);
    }

    /**
     * 将JSON数据向量化并存储到向量库
     * @param jsonData JSON数据
     * @return 返回向量化的段落数量
     */
    public int addJsonToVectorStore(Map<String, Object> jsonData) {
        try {
            // 1. 将JSON转换为文本
            String jsonText = objectMapper.writeValueAsString(jsonData);

            // 2. 创建Document对象
            Document document = Document.from(jsonText);

            // 3. 分割文档
            List<TextSegment> segments = documentSplitter.split(document);

            // 4. 向量化
            List<Embedding> embeddings = qwenEmbeddingModel.embedAll(segments).content();

            // 5. 存储到向量库
            embeddingStore.addAll(embeddings, segments);

            System.out.printf("JSON数据向量化完成：%d 个段落%n", segments.size());
            return segments.size();

        } catch (Exception e) {
            System.err.println("JSON向量化失败: " + e.getMessage());
            e.printStackTrace();
            throw new RuntimeException("向量化处理失败", e);
        }
    }

    /**
     * 批量处理多个JSON对象
     * @param jsonDataList JSON数据列表
     * @return 返回总的向量化段落数量
     */
    public int addJsonListToVectorStore(List<Map<String, Object>> jsonDataList) {
        int totalSegments = 0;
        for (Map<String, Object> jsonData : jsonDataList) {
            totalSegments += addJsonToVectorStore(jsonData);
        }
        return totalSegments;
    }

    /**
     * 将文本内容向量化并存储（如果需要直接处理文本）
     * @param textContent 文本内容
     * @return 返回向量化的段落数量
     */
    public int addTextToVectorStore(String textContent) {
        try {
            // 创建Document对象
            Document document = Document.from(textContent);

            // 分割文档
            List<TextSegment> segments = documentSplitter.split(document);

            // 向量化
            List<Embedding> embeddings = qwenEmbeddingModel.embedAll(segments).content();

            // 存储到向量库
            embeddingStore.addAll(embeddings, segments);

            System.out.printf("文本向量化完成：%d 个段落%n", segments.size());
            return segments.size();

        } catch (Exception e) {
            System.err.println("文本向量化失败: " + e.getMessage());
            e.printStackTrace();
            throw new RuntimeException("向量化处理失败", e);
        }
    }
}