package com.zzh.partnersys.ai.util;

import cn.hutool.core.collection.CollUtil;
import cn.hutool.core.util.StrUtil;
import co.elastic.clients.elasticsearch.ElasticsearchClient;
import co.elastic.clients.elasticsearch._types.query_dsl.Query;
import co.elastic.clients.elasticsearch.core.*;
import co.elastic.clients.elasticsearch.core.bulk.BulkOperation;
import co.elastic.clients.elasticsearch.core.bulk.DeleteOperation;
import co.elastic.clients.elasticsearch.core.bulk.IndexOperation;
import co.elastic.clients.elasticsearch.core.knn_search.KnnSearchQuery;
import co.elastic.clients.elasticsearch.core.search.Hit;
import com.zzh.partnersys.ai.entity.dto.AIDocumentDTO;
import com.zzh.partnersys.common.constant.ElasticSearchConstant;
import com.zzh.partnersys.common.exception.BusinessException;
import jakarta.annotation.Resource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;

import java.io.IOException;
import java.util.List;
import java.util.stream.Collectors;

/**
 * @author: zzh
 * @date: 2025/11/11 23:00:04
 * @version: 1.0
 */
@Component
public class ESRestClientUtil {
    @Resource
    private ElasticsearchClient  elasticsearchClient;
    private static final Logger logger = LoggerFactory.getLogger(ESRestClientUtil.class);


    /**
     * 插入向量数据
     * @param documentDTO 待插入的向量数据
     * @param indexName 索引名称
     */
    public void insertVectorDocument(AIDocumentDTO documentDTO, String indexName){
        IndexResponse response = null;
        try {
            response = elasticsearchClient.index(request -> request.index(indexName).id(documentDTO.getId()).document(documentDTO));
        } catch (IOException e) {
            logger.error("向ES插入向量数据失败", e);
            throw new BusinessException("向ES插入向量数据失败", 40000);
        }
        boolean success = response.result().name().equalsIgnoreCase("CREATED")
                || response.result().name().equalsIgnoreCase("UPDATED");

        if (success) {
            logger.info("文档索引成功: 索引={}, ID={}", indexName, documentDTO.getId());
        }
        else {
            logger.warn("文档索引异常: 索引={}, ID={}, 结果={}", indexName, documentDTO.getId(), response.result().name());
        }
    }


    /**
     * 批量插入向量数据
     * @param documentDTOList 待插入的向量数据
     * @param indexName 索引名称
     */
    public void insertVectorBatchDocument(List<AIDocumentDTO> documentDTOList, String indexName) {
        if (CollUtil.isEmpty(documentDTOList)){
            logger.warn("待插入的向量数据为空");
            return;
        }

        // 1. 构建 BulkRequest：封装所有批量操作
        BulkRequest bulkRequest = BulkRequest.of(b -> {
            // 遍历文档列表，为每个文档创建一个 Index 操作
            for (AIDocumentDTO doc : documentDTOList) {
                try {
                    // 构建单条索引操作（类似单个 index 请求的参数）
                    IndexOperation<AIDocumentDTO> indexOp = IndexOperation.of(i -> i
                            .index(indexName) // 指定索引名
                            .id(doc.getId())  // 指定文档 ID（可选，不指定则 ES 自动生成）
                            .document(doc)    // 要插入的文档对象（需支持 JSON 序列化）
                    );
                    // 将索引操作添加到批量请求中
                    b.operations(BulkOperation.of(o -> o.index(indexOp)));
                } catch (Exception e) {
                    logger.error("构建索引操作失败，文档ID={}", doc.getId(), e);
                }
            }
            return b;
        });

        BulkResponse bulkResponse = null;
        try {
            bulkResponse = elasticsearchClient.bulk(bulkRequest);
        } catch (Exception e) {
            logger.error("向ES批量插入向量数据失败", e);
            System.out.println("向ES批量插入向量数据失败"+  e);
            throw new BusinessException("向ES批量插入向量数据失败", 40000);
        }
        boolean errors = bulkResponse.errors();

        if (errors) {
            // 批量请求中存在失败项，遍历失败详情
            bulkResponse.items().forEach(item -> {
                if (item.error() != null) {
                    // 记录详细的失败日志
                    String errorType = item.error().type() != null ? item.error().type() : "unknown";
                    String errorReason = item.error().reason() != null ? item.error().reason() : "unknown";
                    String errorCausedBy = item.error().causedBy() != null ? 
                            item.error().causedBy().reason() : "none";
                    
                    String errorMsg = String.format(
                            "批量插入 ES 失败：文档 ID=%s，索引=%s，错误类型=%s，错误原因=%s，详细原因=%s",
                            item.id(), indexName, errorType, errorReason, errorCausedBy
                    );
                    logger.error(errorMsg);
                    
                    // 如果是解析错误，输出文档内容用于调试
                    if (errorReason != null && errorReason.contains("failed to parse")) {
                        documentDTOList.stream()
                                .filter(doc -> doc.getId().equals(item.id()))
                                .findFirst()
                                .ifPresent(doc -> {
                                    logger.error("失败文档详情：ID={}, vectors维度={}, createTime={}, assistantId={}, userId={}",
                                            doc.getId(),
                                            doc.getVectors() != null ? doc.getVectors().length : 0,
                                            doc.getCreateTime(),
                                            doc.getAssistantId(),
                                            doc.getUserId());
                                });
                    }
                }
            });
            throw new BusinessException("批量插入 ES 失败，请查看日志了解详情", 40000);
        } else {
            logger.info("批量插入 ES 成功：索引={}，文档数量={}", indexName, documentDTOList.size());
        }
    }

    /**
     * 删除向量数据
     * @param documentId 待删除的向量数据ID
     * @param indexName 索引名称
     */
    public void deleteVectorDocument(String documentId, String indexName) {
        DeleteResponse response = null;
        try {
            response = elasticsearchClient.delete(request -> request.index(indexName).id(documentId));
        } catch (IOException e) {
            logger.error("向ES删除向量数据失败", e);
            throw new BusinessException("向ES删除向量数据失败", 40000);
        }
        boolean success = response.result().name().equalsIgnoreCase("DELETED");

        if (success) {
            logger.info("文档删除成功: 索引={}, ID={}", indexName, documentId);
        }
        else {
            logger.warn("文档删除异常: 索引={}, ID={}, 结果={}", indexName, documentId, response.result().name());
        }
    }

    /**
     * 批量删除向量数据
     * @param documentIdList 待删除的向量数据ID列表
     * @param indexName 索引名称
     */
    public void deleteVectorBatchDocument(List<String> documentIdList, String indexName) {
        if (CollUtil.isEmpty(documentIdList)){
            logger.warn("待删除的向量数据ID列表为空");
            return;
        }
        BulkRequest bulkRequest = BulkRequest.of(b -> {
            for (String documentId : documentIdList) {
                DeleteOperation deleteOp = DeleteOperation.of(d -> d.index(indexName).id(documentId));
                b.operations(BulkOperation.of(o -> o.delete(deleteOp)));
            }
            return b;
        });
        try {
            BulkResponse bulkResponse = elasticsearchClient.bulk(bulkRequest);
            boolean errors = bulkResponse.errors();
            if (errors) {
                // 批量请求中存在失败项，遍历失败详情
                bulkResponse.items().forEach(item -> {
                    if (item.error() != null) {
                        // 记录失败日志：文档 ID、错误原因
                        String errorMsg = String.format(
                                "批量删除 ES 失败：文档 ID=%s，索引=%s，错误原因=%s",
                                item.id(), indexName, item.error().reason()
                        );
                        logger.error(errorMsg);
                    }
                });
            }
            else {
                logger.info("批量删除 ES 成功：索引={}，文档数量={}", indexName, documentIdList.size());
            }
        } catch (IOException e) {
            logger.error("向ES批量删除向量数据失败", e);
            throw new BusinessException("向ES批量删除向量数据失败", 40000);
        }
    }

    /**
     * 搜索向量数据
     * @param queryVector 查询向量
     * @param indexName 索引名称
     * @param topK 搜索结果数量
     * return 搜索结果列表
     */
    public List<AIDocumentDTO> knnVectorSearch(List<Double> queryVector,String indexName,int topK,Long userId,Long assistantId,double similarityThreshold) {
        //参数判断
        if (StrUtil.isEmpty(indexName) || CollUtil.isEmpty(queryVector)){
            //抛出异常
            return List.of();
        }
        KnnSearchQuery knnQuery =
                new KnnSearchQuery.Builder()
                   .field(ElasticSearchConstant.VECTOR_FIELD_NAME)
                   .queryVector(queryVector)
                   .numCandidates(100)
                   .k(topK)
                   .build();
        
        // 构建过滤条件：过滤出等于当前用户的助手的文档
        // 使用 bool 查询组合多个 filter
        final Query filterQuery;
        if (assistantId != null && userId != null) {
            filterQuery = Query.of(q -> q.bool(b -> b
                    .filter(f -> f.term(t -> t.field("assistantId").value(assistantId)))
                    .filter(f -> f.term(t -> t.field("userId").value(userId)))
            ));
        } else if (assistantId != null) {
            filterQuery = Query.of(q -> q.term(t -> t.field("assistantId").value(assistantId)));
        } else if (userId != null) {
            filterQuery = Query.of(q -> q.term(t -> t.field("userId").value(userId)));
        } else {
            filterQuery = null;
        }

        try {
            final Query finalFilterQuery = filterQuery;
            KnnSearchResponse<AIDocumentDTO> searchResponse = elasticsearchClient
                    .knnSearch(request -> {
                        var builder = request.index(indexName).knn(knnQuery);
                        if (finalFilterQuery != null) {
                            builder.filter(finalFilterQuery);
                        }
                        // 只返回需要的字段，不返回 vectors 字段（减少网络传输和内存占用）
                        builder.source(s -> s
                                .filter(f -> f
                                        .includes("id", "content", "docName", "docUrl", "docType", 
                                                "chunkIndex", "totalChunks", "createTime", 
                                                "assistantId", "userId")
                                        .excludes("vectors")  // 明确排除向量字段
                                )
                        );
                        return builder;
                    }, AIDocumentDTO.class);

            List<Hit<AIDocumentDTO>> hits = searchResponse.hits().hits();
            //过滤得分小于0.7的结果 todo 取决于chunk的分块优化
//            hits = hits.stream().filter(hit -> similarityThreshold <= hit.score()).toList();
            return hits.stream().map(Hit::source).collect(Collectors.toList());
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

}
