package com.knowledge.business.service.impl;

import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.knowledge.business.domain.KnowledgeDocumentSegment;
import com.knowledge.business.service.IKnowledgeDocumentSegmentService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.util.List;

/**
 * 文档向量化服务实现类
 *
 * @author knowledge
 */
@Service
public class DocumentVectorService {

    private static final Logger log = LoggerFactory.getLogger(DocumentVectorService.class);

    @Autowired
    private IKnowledgeDocumentSegmentService documentSegmentService;

    @Autowired
    private VectorStorageService vectorStorageService;

    /**
     * 对指定文档的所有分段进行向量化处理
     *
     * @param documentId 文档ID
     */
    public void vectorizeDocumentSegments(String documentId) {
        // 查询所有未处理的分段
        QueryWrapper<KnowledgeDocumentSegment> queryWrapper = new QueryWrapper<>();
        queryWrapper.eq("document_id", documentId);
        queryWrapper.eq("vector_status", "0"); // 未处理状态
        List<KnowledgeDocumentSegment> segments = documentSegmentService.list(queryWrapper);
        log.info("开始处理文档 {} 的向量化，共有 {} 个分段需要处理", documentId, segments.size());

        // 删除Milvus中已存在的该文档向量数据
        try {
            vectorStorageService.deleteDocumentVectors(documentId);
        } catch (Exception e) {
            log.warn("删除文档 {} 在Milvus中的旧向量数据失败: {}", documentId, e.getMessage());
        }

        int successCount = 0;
        int failureCount = 0;

        for (KnowledgeDocumentSegment segment : segments) {
            try {
                // 调用向量化方法
                Boolean b = documentSegmentService.vectorizeSegmentContent(segment);
                if (b) {
                    successCount++;
                    log.debug("文档分段 {} 向量化处理成功", segment.getId());
                }
            } catch (Exception e) {
                failureCount++;
                log.error("文档分段 {} 向量化处理失败: {}", segment.getId(), e.getMessage(), e);
            }
        }

        log.info("文档 {} 向量化处理完成，成功: {}，失败: {}", documentId, successCount, failureCount);
    }

    /**
     * 对所有未处理的文档分段进行向量化处理
     */
    public void vectorizeAllPendingSegments() {
        // 查询所有未处理的分段
        QueryWrapper<KnowledgeDocumentSegment> queryWrapper = new QueryWrapper<>();
        queryWrapper.eq("vector_status", "0"); // 未处理状态

        List<KnowledgeDocumentSegment> segments = documentSegmentService.list(queryWrapper);

        log.info("开始处理所有未向量化的文档分段，共有 {} 个分段需要处理", segments.size());

        int successCount = 0;
        int failureCount = 0;

        for (KnowledgeDocumentSegment segment : segments) {
            try {
                // 调用向量化方法
                Boolean b = documentSegmentService.vectorizeSegmentContent(segment);
                successCount++;
                log.debug("文档分段 {} 向量化处理成功", segment.getId());
            } catch (Exception e) {
                failureCount++;
                log.error("文档分段 {} 向量化处理失败: {}", segment.getId(), e.getMessage(), e);
            }
        }

        log.info("所有未向量化的文档分段处理完成，成功: {}，失败: {}", successCount, failureCount);
    }
}
