package com.example.apitesttool.service;

import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.openai.OpenAiEmbeddingModel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;

import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;

/**
 * 内存向量数据库服务
 * 提供文档向量化、存储和语义搜索功能
 */
@Service
public class VectorDatabaseService {
    
    private static final Logger logger = LoggerFactory.getLogger(VectorDatabaseService.class);
    
    private final EmbeddingModel embeddingModel;
    private final Map<String, VectorDocument> vectorStore;
    private final Map<String, List<String>> documentChunks;
    
    public VectorDatabaseService() {
        // 使用 OpenAI 的嵌入模型
        this.embeddingModel = OpenAiEmbeddingModel.builder()
                .apiKey(System.getenv("OPENAI_API_KEY"))
                .modelName("text-embedding-ada-002")
                .build();
        this.vectorStore = new ConcurrentHashMap<>();
        this.documentChunks = new ConcurrentHashMap<>();
        logger.info("内存向量数据库服务初始化完成");
    }
    
    /**
     * 向量文档数据结构
     */
    public static class VectorDocument {
        private final String id;
        private final String content;
        private final Embedding embedding;
        private final Map<String, Object> metadata;
        
        public VectorDocument(String id, String content, Embedding embedding, Map<String, Object> metadata) {
            this.id = id;
            this.content = content;
            this.embedding = embedding;
            this.metadata = metadata != null ? metadata : new HashMap<>();
        }
        
        public String getId() { return id; }
        public String getContent() { return content; }
        public Embedding getEmbedding() { return embedding; }
        public Map<String, Object> getMetadata() { return metadata; }
    }
    
    /**
     * 搜索结果数据结构
     */
    public static class SearchResult {
        private final VectorDocument document;
        private final double similarity;
        
        public SearchResult(VectorDocument document, double similarity) {
            this.document = document;
            this.similarity = similarity;
        }
        
        public VectorDocument getDocument() { return document; }
        public double getSimilarity() { return similarity; }
    }
    
    /**
     * 添加文档到向量数据库
     */
    public void addDocument(String documentId, String content, Map<String, Object> metadata) {
        try {
            // 将长文档分块
            List<String> chunks = chunkText(content, 500);
            documentChunks.put(documentId, chunks);
            
            // 为每个分块生成向量并存储
            for (int i = 0; i < chunks.size(); i++) {
                String chunkId = documentId + "_chunk_" + i;
                String chunkContent = chunks.get(i);
                
                // 生成向量
                Embedding embedding = embeddingModel.embed(chunkContent).content();
                
                // 创建元数据
                Map<String, Object> chunkMetadata = new HashMap<>(metadata);
                chunkMetadata.put("documentId", documentId);
                chunkMetadata.put("chunkIndex", i);
                chunkMetadata.put("totalChunks", chunks.size());
                
                // 存储向量文档
                VectorDocument vectorDoc = new VectorDocument(chunkId, chunkContent, embedding, chunkMetadata);
                vectorStore.put(chunkId, vectorDoc);
            }
            
            logger.info("文档 {} 已向量化并存储，共 {} 个分块", documentId, chunks.size());
        } catch (Exception e) {
            logger.error("向量化文档失败: {}", documentId, e);
            throw new RuntimeException("向量化文档失败: " + e.getMessage(), e);
        }
    }
    
    /**
     * 语义搜索
     */
    public List<SearchResult> search(String query, int topK) {
        try {
            if (vectorStore.isEmpty()) {
                return Collections.emptyList();
            }
            
            // 生成查询向量
            Embedding queryEmbedding = embeddingModel.embed(query).content();
            
            // 计算相似度并排序
            List<SearchResult> results = vectorStore.values().stream()
                    .map(doc -> {
                        double similarity = cosineSimilarity(queryEmbedding, doc.getEmbedding());
                        return new SearchResult(doc, similarity);
                    })
                    .sorted((a, b) -> Double.compare(b.getSimilarity(), a.getSimilarity()))
                    .limit(topK)
                    .collect(Collectors.toList());
            
            logger.debug("语义搜索查询: '{}', 返回 {} 个结果", query, results.size());
            return results;
        } catch (Exception e) {
            logger.error("语义搜索失败", e);
            return Collections.emptyList();
        }
    }
    
    /**
     * 删除文档
     */
    public void removeDocument(String documentId) {
        List<String> chunks = documentChunks.remove(documentId);
        if (chunks != null) {
            for (int i = 0; i < chunks.size(); i++) {
                String chunkId = documentId + "_chunk_" + i;
                vectorStore.remove(chunkId);
            }
            logger.info("已删除文档 {} 的向量数据", documentId);
        }
    }
    
    /**
     * 获取数据库统计信息
     */
    public Map<String, Object> getStats() {
        Map<String, Object> stats = new HashMap<>();
        stats.put("totalDocuments", documentChunks.size());
        stats.put("totalChunks", vectorStore.size());
        stats.put("embeddingDimension", vectorStore.isEmpty() ? 0 : 
                vectorStore.values().iterator().next().getEmbedding().dimension());
        return stats;
    }
    
    /**
     * 清空数据库
     */
    public void clear() {
        vectorStore.clear();
        documentChunks.clear();
        logger.info("向量数据库已清空");
    }
    
    /**
     * 文本分块
     */
    private List<String> chunkText(String text, int maxChunkSize) {
        List<String> chunks = new ArrayList<>();
        String[] sentences = text.split("(?<=[.!?])\\s+");
        
        StringBuilder currentChunk = new StringBuilder();
        for (String sentence : sentences) {
            if (currentChunk.length() + sentence.length() > maxChunkSize && currentChunk.length() > 0) {
                chunks.add(currentChunk.toString().trim());
                currentChunk = new StringBuilder();
            }
            currentChunk.append(sentence).append(" ");
        }
        
        if (currentChunk.length() > 0) {
            chunks.add(currentChunk.toString().trim());
        }
        
        return chunks.isEmpty() ? List.of(text) : chunks;
    }
    
    /**
     * 计算余弦相似度
     */
    private double cosineSimilarity(Embedding a, Embedding b) {
        float[] vectorA = a.vector();
        float[] vectorB = b.vector();
        
        if (vectorA.length != vectorB.length) {
            throw new IllegalArgumentException("向量维度不匹配");
        }
        
        double dotProduct = 0.0;
        double normA = 0.0;
        double normB = 0.0;
        
        for (int i = 0; i < vectorA.length; i++) {
            dotProduct += vectorA[i] * vectorB[i];
            normA += vectorA[i] * vectorA[i];
            normB += vectorB[i] * vectorB[i];
        }
        
        return dotProduct / (Math.sqrt(normA) * Math.sqrt(normB));
    }
}

