package com.example.deepseek.service.impl;

import com.example.deepseek.model.DocumentChunk;
import com.example.deepseek.service.VectorStoreService;
import lombok.extern.slf4j.Slf4j;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.ByteBuffersDirectory;
import org.apache.lucene.store.Directory;
import org.springframework.stereotype.Service;

import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;

@Service
@Slf4j
public class InMemoryVectorStoreServiceImpl implements VectorStoreService {

    // 存储向量和文档ID的映射
    private final Map<String, float[]> embeddingMap = new ConcurrentHashMap<>();
    
    // 存储文档内容
    private final Map<String, String> contentMap = new ConcurrentHashMap<>();
    
    // 存储文档元数据
    private final Map<String, Map<String, Object>> metadataMap = new ConcurrentHashMap<>();
    
    // Lucene目录
    private Directory directory;
    
    // Lucene分析器
    private StandardAnalyzer analyzer;
    
    // Lucene索引写入器
    private IndexWriter indexWriter;
    
    @PostConstruct
    public void init() throws IOException {
        directory = new ByteBuffersDirectory();
        analyzer = new StandardAnalyzer();
        IndexWriterConfig config = new IndexWriterConfig(analyzer);
        indexWriter = new IndexWriter(directory, config);
    }
    
    @PreDestroy
    public void cleanup() throws IOException {
        if (indexWriter != null) {
            indexWriter.close();
        }
        if (directory != null) {
            directory.close();
        }
        if (analyzer != null) {
            analyzer.close();
        }
    }

    @Override
    public void addDocument(String id, String content, float[] embedding, Map<String, Object> metadata) {
        try {
            // 存储向量
            embeddingMap.put(id, embedding);
            
            // 存储内容
            contentMap.put(id, content);
            
            // 存储元数据
            metadataMap.put(id, metadata);
            
            // Lucene索引
            Document document = new Document();
            document.add(new TextField("id", id, Field.Store.YES));
            document.add(new TextField("content", content, Field.Store.YES));
            
            // 添加元数据字段
            for (Map.Entry<String, Object> entry : metadata.entrySet()) {
                String key = entry.getKey();
                String value = String.valueOf(entry.getValue());
                document.add(new TextField("metadata_" + key, value, Field.Store.YES));
            }
            
            // 更新索引
            indexWriter.addDocument(document);
            indexWriter.commit();
            
        } catch (IOException e) {
            throw new RuntimeException("Error adding document to index", e);
        }
    }

    @Override
    public void addDocuments(List<String> ids, List<String> contents, List<float[]> embeddings, 
                           List<Map<String, Object>> metadatas) {
        try {
            for (int i = 0; i < ids.size(); i++) {
                String id = ids.get(i);
                String content = contents.get(i);
                float[] embedding = embeddings.get(i);
                Map<String, Object> metadata = metadatas.get(i);
                
                // 存储向量、内容和元数据
                embeddingMap.put(id, embedding);
                contentMap.put(id, content);
                metadataMap.put(id, metadata);
                
                // Lucene索引
                Document document = new Document();
                document.add(new TextField("id", id, Field.Store.YES));
                document.add(new TextField("content", content, Field.Store.YES));
                
                // 添加元数据字段
                for (Map.Entry<String, Object> entry : metadata.entrySet()) {
                    String key = entry.getKey();
                    String value = String.valueOf(entry.getValue());
                    document.add(new TextField("metadata_" + key, value, Field.Store.YES));
                }
                
                indexWriter.addDocument(document);
            }
            
            // 批量提交
            indexWriter.commit();
            
        } catch (IOException e) {
            throw new RuntimeException("Error adding documents to index", e);
        }
    }

    @Override
    public List<DocumentChunk> searchSimilar(float[] queryEmbedding, int limit) {
        // 计算所有向量与查询向量的余弦相似度
        List<ScoredDocument> scoredDocuments = new ArrayList<>();
        
        for (Map.Entry<String, float[]> entry : embeddingMap.entrySet()) {
            String id = entry.getKey();
            float[] docEmbedding = entry.getValue();
            
            // 计算余弦相似度
            float similarity = cosineSimilarity(queryEmbedding, docEmbedding);
            
            scoredDocuments.add(new ScoredDocument(id, similarity));
        }
        
        // 按相似度降序排序
        scoredDocuments.sort((a, b) -> Float.compare(b.score, a.score));
        
        // 转换为DocumentChunk对象
        return scoredDocuments.stream()
                .limit(limit)
                .map(doc -> {
                    String id = doc.id;
                    return new DocumentChunk(
                            id,
                            contentMap.get(id),
                            metadataMap.get(id),
                            doc.score
                    );
                })
                .collect(Collectors.toList());
    }

    // 添加BM25搜索功能
    @Override
    public List<DocumentChunk> searchByKeyword(String query, int limit) {
        try (IndexReader reader = DirectoryReader.open(directory)) {
            IndexSearcher searcher = new IndexSearcher(reader);
            QueryParser parser = new QueryParser("content", analyzer);

            // 转义查询字符串中的特殊字符
            String escapedQuery = QueryParser.escape(query);

            // 安全处理长查询
            if (escapedQuery.length() > 1000) {
                escapedQuery = escapedQuery.substring(0, 1000);
            }

            Query luceneQuery = parser.parse(escapedQuery);
            TopDocs topDocs = searcher.search(luceneQuery, limit);

            // 转换结果
            List<DocumentChunk> results = new ArrayList<>();
            for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
                Document doc = searcher.doc(scoreDoc.doc);
                String id = doc.get("id");

                results.add(new DocumentChunk(
                        id,
                        contentMap.get(id),
                        metadataMap.get(id),
                        scoreDoc.score
                ));
            }

            return results;
        } catch (Exception e) {
            log.error("Error in keyword search: " + e.getMessage(), e);
            return new ArrayList<>();
        }
    }

    // 新增：混合检索方法
    @Override
    public List<DocumentChunk> hybridSearch(String query, float[] queryEmbedding, int limit) {
        try {
            // 获取向量搜索结果
            List<DocumentChunk> vectorResults = searchSimilar(queryEmbedding, limit * 2);

            // 获取关键词搜索结果
            List<DocumentChunk> keywordResults = searchByKeyword(query, limit * 2);

            // 合并结果并重新排序
            Map<String, DocumentChunk> combinedResults = new HashMap<>();

            // 处理向量结果，权重0.7
            for (DocumentChunk chunk : vectorResults) {
                chunk.setScore(chunk.getScore() * 0.7);
                combinedResults.put(chunk.getId(), chunk);
            }

            // 处理关键词结果，权重0.3，如果已存在则合并分数
            for (DocumentChunk chunk : keywordResults) {
                if (combinedResults.containsKey(chunk.getId())) {
                    DocumentChunk existing = combinedResults.get(chunk.getId());
                    existing.setScore(existing.getScore() + chunk.getScore() * 0.3);
                } else {
                    chunk.setScore(chunk.getScore() * 0.3);
                    combinedResults.put(chunk.getId(), chunk);
                }
            }

            // 排序并限制结果数量
            return combinedResults.values().stream()
                    .sorted((a, b) -> Double.compare(b.getScore(), a.getScore()))
                    .limit(limit)
                    .collect(Collectors.toList());

        } catch (Exception e) {
            log.error("Error in hybrid search: " + e.getMessage(), e);
            // 出错时回退到向量搜索
            try {
                return searchSimilar(queryEmbedding, limit);
            } catch (Exception ve) {
                log.error("Fallback to vector search also failed: " + ve.getMessage());
                return new ArrayList<>();
            }
        }
    }

    @Override
    public void clearAll() {
        try {
            // 清除内存中的数据
            embeddingMap.clear();
            contentMap.clear();
            metadataMap.clear();
            
            // 清除Lucene索引
            indexWriter.deleteAll();
            indexWriter.commit();
            
        } catch (IOException e) {
            throw new RuntimeException("Error clearing vector store", e);
        }
    }

    @Override
    public int getDocumentCount() {
        return embeddingMap.size();
    }
    
    /**
     * 计算余弦相似度
     */
    private float cosineSimilarity(float[] vectorA, float[] vectorB) {
        float dotProduct = 0.0f;
        float normA = 0.0f;
        float normB = 0.0f;
        
        for (int i = 0; i < vectorA.length; i++) {
            dotProduct += vectorA[i] * vectorB[i];
            normA += vectorA[i] * vectorA[i];
            normB += vectorB[i] * vectorB[i];
        }
        
        if (normA == 0 || normB == 0) {
            return 0;
        }
        
        return dotProduct / (float) (Math.sqrt(normA) * Math.sqrt(normB));
    }
    
    /**
     * 内部类，用于存储带分数的文档ID
     */
    private static class ScoredDocument {
        final String id;
        final float score;
        
        ScoredDocument(String id, float score) {
            this.id = id;
            this.score = score;
        }
    }


}