package com.wfh.mianshiji.ai.rag;

import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.ai.document.Document;
import org.springframework.ai.embedding.EmbeddingModel;
import org.springframework.ai.vectorstore.VectorStore;
import org.springframework.ai.vectorstore.pgvector.PgVectorStore;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.util.StopWatch;

import java.util.*;
import java.util.stream.Collectors;

import static org.springframework.ai.vectorstore.pgvector.PgVectorStore.PgDistanceType.COSINE_DISTANCE;
import static org.springframework.ai.vectorstore.pgvector.PgVectorStore.PgIndexType.HNSW;

/**
 * PgVector向量存储配置
 * @author fenghuanwang
 */
@Configuration
@Slf4j
public class PgVectorStoreConfig {

    @Resource
    private ProgramingAppDocLoader programingAppDocLoader;

    private static final int VECTOR_DIMENSIONS = 1536;
    private static final String SCHEMA_NAME = "public";
    private static final String TABLE_NAME = "vector_store";
    // 文件基础ID长度
    private static final int FILE_BASE_LENGTH = 22;

    @Bean
    @Primary
    public VectorStore pgVectorVectorStore(
            @Qualifier("postgresJdbcTemplate") JdbcTemplate jdbcTemplate,
            @Qualifier("dashscopeEmbeddingModel") EmbeddingModel embeddingModel) {

        initializeDatabase(jdbcTemplate);

        PgVectorStore vectorStore = buildVectorStore(jdbcTemplate, embeddingModel);
        performIncrementalSync(vectorStore, jdbcTemplate);

        return vectorStore;
    }

    /**
     * 初始化数据库扩展和表结构
     */
    private void initializeDatabase(JdbcTemplate jdbcTemplate) {
        try {
            jdbcTemplate.execute("CREATE EXTENSION IF NOT EXISTS vector");
            log.info("✅ PostgreSQL vector extension initialized");
        } catch (Exception e) {
            log.warn("Vector extension may already exist: {}", e.getMessage());
        }

        String createTableSql = """
            CREATE TABLE IF NOT EXISTS %s.%s (
                id TEXT PRIMARY KEY,
                content TEXT,
                metadata JSONB,
                embedding VECTOR(%d)
            )
            """.formatted(SCHEMA_NAME, TABLE_NAME, VECTOR_DIMENSIONS);

        try {
            jdbcTemplate.execute(createTableSql);
            log.info("✅ Vector table created or already exists: {}.{}", SCHEMA_NAME, TABLE_NAME);
        } catch (Exception e) {
            throw new RuntimeException("Failed to create vector table", e);
        }
    }

    /**
     * 构建向量存储实例
     */
    private PgVectorStore buildVectorStore(JdbcTemplate jdbcTemplate, EmbeddingModel embeddingModel) {
        return PgVectorStore.builder(jdbcTemplate, embeddingModel)
                .dimensions(VECTOR_DIMENSIONS)
                .distanceType(COSINE_DISTANCE)
                .indexType(HNSW)
                .idType(PgVectorStore.PgIdType.TEXT)
                .initializeSchema(false) // 我们手动初始化了表结构
                .schemaName(SCHEMA_NAME)
                .vectorTableName(TABLE_NAME)
                .maxDocumentBatchSize(1000)
                .build();
    }

    /**
     * 执行增量文档同步
     */
    private void performIncrementalSync(PgVectorStore vectorStore, JdbcTemplate jdbcTemplate) {
        StopWatch stopWatch = new StopWatch("DocumentSync");
        stopWatch.start();

        try {
            log.info("🔄 Starting incremental document sync...");

            List<Document> currentDocs = programingAppDocLoader.loadMds();
            if (currentDocs.isEmpty()) {
                log.warn("⚠️ No source documents found");
                return;
            }

            SyncContext context = buildSyncContext(currentDocs, jdbcTemplate);
            executeSyncOperations(vectorStore, context);

            stopWatch.stop();
            log.info("✅ Incremental sync completed in {} ms. Added: {}, Deleted: {}",
                    stopWatch.getTotalTimeMillis(), currentDocs.size(), context.getIdsToDelete().size());

        } catch (Exception e) {
            log.error("💥 Failed to sync documents incrementally", e);
            throw new RuntimeException("Vector store initialization failed", e);
        }
    }

    /**
     * 构建同步上下文
     */
    private SyncContext buildSyncContext(List<Document> currentDocs, JdbcTemplate jdbcTemplate) {
        // 提取当前文档的文件基础ID
        Set<String> currentFileBases = extractFileBases(currentDocs);

        // 查询现有文档信息
        List<Map<String, Object>> existingRows = jdbcTemplate.queryForList(
                "SELECT id, metadata->>'hash' as hash FROM %s.%s".formatted(SCHEMA_NAME, TABLE_NAME)
        );

        Map<String, String> existingIdToHash = new HashMap<>();
        Map<String, Set<String>> existingFileBaseToIds = new HashMap<>();

        for (Map<String, Object> row : existingRows) {
            String id = (String) row.get("id");
            String hash = (String) row.get("hash");

            existingIdToHash.put(id, hash);
            String fileBase = extractFileBase(id);
            existingFileBaseToIds.computeIfAbsent(fileBase, k -> new HashSet<>()).add(id);
        }

        // 计算需要删除的文档ID
        List<String> idsToDelete = calculateDocumentsToDelete(currentFileBases, existingFileBaseToIds);

        // 计算需要更新的文档（基于hash变化）
        calculateDocumentsToUpdate(currentDocs, existingFileBaseToIds, existingIdToHash, idsToDelete);

        return new SyncContext(currentDocs, idsToDelete);
    }

    /**
     * 提取文档的文件基础ID
     */
    private Set<String> extractFileBases(List<Document> documents) {
        return documents.stream()
                .map(doc -> extractFileBase(doc.getId()))
                .collect(Collectors.toSet());
    }

    /**
     * 从文档ID中提取文件基础部分
     */
    private String extractFileBase(String documentId) {
        return documentId.length() > FILE_BASE_LENGTH ?
                documentId.substring(0, FILE_BASE_LENGTH) : documentId;
    }

    /**
     * 计算需要删除的文档
     */
    private List<String> calculateDocumentsToDelete(Set<String> currentFileBases,
                                                    Map<String, Set<String>> existingFileBaseToIds) {
        Set<String> toDeleteFileBases = new HashSet<>(existingFileBaseToIds.keySet());
        toDeleteFileBases.removeAll(currentFileBases);

        return toDeleteFileBases.stream()
                .flatMap(fileBase -> existingFileBaseToIds.get(fileBase).stream())
                .collect(Collectors.toList());
    }

    /**
     * 计算需要更新的文档（基于hash变化）
     */
    private void calculateDocumentsToUpdate(List<Document> currentDocs,
                                            Map<String, Set<String>> existingFileBaseToIds,
                                            Map<String, String> existingIdToHash,
                                            List<String> idsToDelete) {
        Set<String> possiblyChangedFileBases = new HashSet<>(existingFileBaseToIds.keySet());
        possiblyChangedFileBases.retainAll(extractFileBases(currentDocs));

        for (String fileBase : possiblyChangedFileBases) {
            Optional<String> currentHash = findCurrentHashByFileBase(currentDocs, fileBase);

            if (currentHash.isPresent()) {
                boolean hashChanged = existingFileBaseToIds.get(fileBase).stream()
                        .anyMatch(id -> !currentHash.get().equals(existingIdToHash.get(id)));

                if (hashChanged) {
                    idsToDelete.addAll(existingFileBaseToIds.get(fileBase));
                }
            }
        }
    }

    /**
     * 根据文件基础ID查找当前hash
     */
    private Optional<String> findCurrentHashByFileBase(List<Document> documents, String fileBase) {
        return documents.stream()
                .filter(doc -> doc.getId().startsWith(fileBase))
                .findFirst()
                .map(doc -> (String) doc.getMetadata().get("hash"));
    }

    /**
     * 执行同步操作
     */
    private void executeSyncOperations(VectorStore vectorStore, SyncContext context) {
        // 执行删除
        if (!context.getIdsToDelete().isEmpty()) {
            log.info("🗑️ Deleting {} outdated document chunks...", context.getIdsToDelete().size());
            vectorStore.delete(context.getIdsToDelete());
        }

        // 执行添加
        if (!context.getCurrentDocs().isEmpty()) {
            log.info("📥 Adding {} document chunks...", context.getCurrentDocs().size());
            vectorStore.add(context.getCurrentDocs());
        }
    }

    /**
     * 同步上下文类
     */
    private static class SyncContext {
        private final List<Document> currentDocs;
        private final List<String> idsToDelete;

        public SyncContext(List<Document> currentDocs, List<String> idsToDelete) {
            this.currentDocs = currentDocs;
            this.idsToDelete = idsToDelete;
        }

        public List<Document> getCurrentDocs() {
            return currentDocs;
        }

        public List<String> getIdsToDelete() {
            return idsToDelete;
        }
    }
}