package com.ruoyi.admin.service.document.impl;

import com.alibaba.fastjson2.JSONArray;
import com.alibaba.fastjson2.JSONObject;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.conditions.update.UpdateWrapper;
import com.ruoyi.admin.domain.LlmDatasets;
import com.ruoyi.admin.domain.LlmDocumentSegments;
import com.ruoyi.admin.domain.LlmDocuments;
import com.ruoyi.admin.mapper.LlmDatasetsMapper;
import com.ruoyi.admin.mapper.LlmDocumentsMapper;
import com.ruoyi.admin.mapper.LlmDocumentsSegmentsMapper;
import com.ruoyi.admin.mapper.LlmFileMapper;
import com.ruoyi.admin.service.document.ILlmDocumentSegmentsService;
import com.ruoyi.admin.service.document.ILlmDocumentsService;
import com.ruoyi.admin.service.document.ProcessorQueueService;
import com.ruoyi.common.constant.MilvusConstants;
import com.ruoyi.common.constant.SliceMethod;
import com.ruoyi.common.core.domain.entity.LlmFile;
import com.ruoyi.common.exception.ValidException;
import com.ruoyi.common.utils.DateUtils;
import com.ruoyi.common.utils.MilvusUtils;
import com.ruoyi.common.utils.StpSystemUtil;
import com.ruoyi.common.utils.StringUtils;
import com.ruoyi.common.utils.file.ChunkUtils;
import com.ruoyi.common.utils.file.FileParseUtils;
import com.ruoyi.common.utils.html.HTMLParse;
import com.ruoyi.common.utils.uuid.IdUtils;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.util.EntityUtils;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;

import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;

/**
 * 知识服务类
 */
@Slf4j
@Service
@RequiredArgsConstructor
public class LlmDocumentsServiceImpl implements ILlmDocumentsService {

    private final LlmFileMapper llmFileMapper;

    private final LlmDocumentsMapper llmDocumentsMapper;

    private final LlmDocumentsSegmentsMapper llmDocumentsSegmentsMapper;

    private final LlmDatasetsMapper llmDatasetsMapper;

    private final ILlmDocumentSegmentsService llmDocumentSegmentsService;

    @Override
    public Map<String, Object> parseText(String fileId, String parse) throws Exception {

        HashMap<String, Object> objectObjectHashMap = new HashMap<>();
        //将文件取出
        LlmFile llmFileById = llmFileMapper.getLlmFileById(fileId);
        String fileUrl = llmFileById.getFileUrl();
        //解析文件结果
        String jsonArray = FileParseUtils.parseFileToContent(Collections.singletonList(fileUrl));
        List<JSONObject> jsonObjects = JSONArray.parseArray(jsonArray, JSONObject.class);
        //因为这里只会给一个url 进去所以只会返回一个content
        jsonObjects.forEach(jsonObject -> {
            String content = jsonObject.getString("content");
            llmFileById.setContent(content);
            objectObjectHashMap.put("content", content);
            objectObjectHashMap.put("fileId", fileId);
            objectObjectHashMap.put("knowledgeName", llmFileById.getFileName());
        });
        //将解析出来的结果保存
        llmFileMapper.updateById(llmFileById);
        return objectObjectHashMap;

    }

    @Override
    @Transactional(rollbackFor = Exception.class)
    public boolean processText(String textId,
                               String businessDataString,
                               String treeId,
                               String summer,
                               String tag,
                               String knowledgeName,
                               String datasetCode,
                               int titleRow,
                               String catalogue) {
        //通过传过来的文本id 查询出来处理过的文本和url
        LlmFile llmFile = llmFileMapper.selectById(textId);
        if (StringUtils.isBlank(llmFile.getContent())) {
            throw new ValidException("上传文件内容为空");
        }

        //中台上传就需要自动生成一个knowledgeId
        String knowledgeId = IdUtils.fastUUID();
        // 先判断当前数据库中是否已经存在相同的知识ID数据
        QueryWrapper<LlmDocuments> llmDocumentsQueryWrapper = new QueryWrapper<>();
        llmDocumentsQueryWrapper.eq("knowledge_id", knowledgeId)
                .eq("if_del", 0)
                .last("limit 1");
        LlmDocuments llmDocuments = llmDocumentsMapper.selectOne(llmDocumentsQueryWrapper, false);
        if (llmDocuments != null) {
            deleteDocumentsReal(MilvusConstants.VECTOR_DOCUMENT_SEGMENT, datasetCode, llmDocuments.getId());
        }

        //新增一个文档记录
        LlmDocuments documents = new LlmDocuments();
        documents.setDatasetCode(datasetCode);
        documents.setId(IdUtils.simpleUUID());
        documents.setKnowledgeId(knowledgeId);
        documents.setKnowledgeName(knowledgeName);
        documents.setBusinessData(businessDataString);
        documents.setTreeId(treeId);
        documents.setSummary(summer);
        documents.setTag(tag);
        documents.setCatalog(catalogue);
        documents.setUserId(StpSystemUtil.getUserInfo().getAdminUserDO().getId());
        documents.setFileSize(llmFile.getFileSize());
        boolean b = insertDocument(llmFile, documents);
        if (!b) {
            return false;
        }
        //给队列添加一个任务
        return ProcessorQueueService.addLlmDocumentQueue(documents);
    }

    /**
     * 处理队列里面的任务
     *
     * @throws InterruptedException
     */
    public void startProcessorQueue() throws InterruptedException {

        LlmDocuments llmDocumentQueue = ProcessorQueueService.getLlmDocumentQueue();

        //修改状态为处理中
        UpdateWrapper<LlmDocuments> llmDocumentsUpdateWrapper = new UpdateWrapper<>();
        llmDocumentsUpdateWrapper.set("status", 2);
        llmDocumentsUpdateWrapper.eq("id", llmDocumentQueue.getId());
        llmDocumentsMapper.updateById(llmDocumentQueue);

        // 获取自己维度
        LlmDatasets byId = llmDatasetsMapper.selectById(llmDocumentQueue.getTreeId());
        try {
            //  更新知识库数据
            LlmDatasets datasetsOld = llmDatasetsMapper.selectOne(new QueryWrapper<LlmDatasets>().eq("dataset_code", llmDocumentQueue.getDatasetCode()).eq("parent_id", "0"));
            UpdateWrapper<LlmDatasets> updateWrapper = new UpdateWrapper<>();
            updateWrapper.eq("id", datasetsOld.getId());
            updateWrapper.set("document_count", datasetsOld.getDocumentCount() + 1);
            updateWrapper.set("word_count", datasetsOld.getDocumentCount() + llmDocumentQueue.getContent().length());
            llmDatasetsMapper.update(null, updateWrapper);

        } catch (RuntimeException e) {
            //处理失败
            llmDocumentsUpdateWrapper = new UpdateWrapper<>();
            llmDocumentsUpdateWrapper.set("status", 4);
            llmDocumentsUpdateWrapper.eq("id", llmDocumentQueue.getId());
            llmDocumentsUpdateWrapper.set("error_message", e.getMessage());
            llmDocumentsMapper.updateById(llmDocumentQueue);
            log.error("【更新知识库数据失败】原因：{}", e.getMessage(), e);
            throw new RuntimeException("【更新知识库数据失败】原因：" + e.getMessage());
        }

        // 创建分块并保存Milvus（分块模式）
        int i;
        try {
            llmDocumentQueue.setTokenLength(0);
            i = textChunkSave(llmDocumentQueue.getDatasetCode(), byId, llmDocumentQueue);
            // 进行切片数量回填
            llmDocumentQueue.setSegmentsCount(i - 1);
            llmDocumentQueue.setStatus(3);
        } catch (Exception e) {
            //处理失败
            llmDocumentsUpdateWrapper = new UpdateWrapper<>();
            llmDocumentsUpdateWrapper.set("status", 4);
            llmDocumentsUpdateWrapper.eq("id", llmDocumentQueue.getId());
            llmDocumentsUpdateWrapper.set("error_message", e.getMessage());
            llmDocumentsMapper.updateById(llmDocumentQueue);
            log.error("【向指定知识库中存入数据失败】原因：分块并保存Milvus失败-{}", e.getMessage(), e);
            throw new RuntimeException("【向指定知识库中存入数据失败】原因：分块并保存Milvus失败-" + e.getMessage());
        }
        llmDocumentsUpdateWrapper = new UpdateWrapper<>();
        llmDocumentsUpdateWrapper.set("status", 3);
        llmDocumentsUpdateWrapper.eq("id", llmDocumentQueue.getId());
        llmDocumentsMapper.updateById(llmDocumentQueue);
    }

    /**
     * 文本切块保存
     *
     * @param datasetCode 知识库code
     * @param datasets    知识库对象
     * @param documents   文本信息对象
     * @return
     */
    public int textChunkSave(String datasetCode,
                             LlmDatasets datasets,
                             LlmDocuments documents) {
        // 拆分模式
        String sliceMethod;
        // 拆分长度
        int sliceTypeMaxLength = 800;
        if (datasets != null) {
            sliceMethod = datasets.getSliceType();
            sliceTypeMaxLength = datasets.getSliceTypeMaxLength();
        } else {
            sliceMethod = "standard";
        }
        if (StringUtils.isBlank(sliceMethod)) {
            sliceMethod = "default";
        }

        // 分块命名、记录分块数
        int i = 1;
        // 对内容进行清洗分段和字数统计
        try {
            // 这个地方是针对文档内容进行了分段处理
            String content = documents.getContent();
            //如果文本里面有特殊标识就走特殊切块 excel标识
            boolean contains = content.contains("^^^^^^^^");
            // 判断传入的切片方式，进行合理的处理
            if (sliceMethod.equals(SliceMethod.DEFAULT.getSliceMethod())) {
                // 特殊类型的切分方式
                if (contains) {
                    return processTemplateChunkBySpecial(documents.getContent(), documents);
                }
                // 默认按语义切分
                else {
                    processTemplateChunkByDefault(documents.getContent(), documents);
                }

            }
            // 其他切片方式
            else {
                // 按文本长度拆分
                if (sliceMethod.equals(SliceMethod.STANDARD.getSliceMethod())) {
                    documents.setTokenLength(sliceTypeMaxLength);
                    List<String> strings = ChunkUtils.fixedSegment(documents.getContent(), sliceTypeMaxLength);
                    for (String string : strings) {
                        llmDocumentSegmentsService.insert(new HashMap<>(), string, documents);
                    }

                    return strings.size();
                }
                // todo 按章节模式拆分
                else if (sliceMethod.equals(SliceMethod.CHAPTER.getSliceMethod())) {
                    documents.setTokenLength(0);
                    return regexSegment(documents);
                }
                // 按正则模式拆分
                else if (sliceMethod.equals(SliceMethod.CUSTOM.getSliceMethod())) {
                    documents.setTokenLength(0);
                    return regexSegment(documents);
                }
                // 不进行切分
                else if (sliceMethod.equals(SliceMethod.NONE.getSliceMethod())) {
                    documents.setTokenLength(documents.getContent().length());
                    return noneSegment(documents);
                }
            }
            return i;
        } catch (Exception e) {
            log.error("插入失败: ", e);
            throw new RuntimeException("插入失败: ", e);
        }
    }

    /**
     * 不进行切分
     *
     * @param documents 文本对象
     * @return 切片个数
     */
    public Integer noneSegment(LlmDocuments documents) {
        List<String> segments = Collections.singletonList(documents.getContent());
        for (String segment : segments) {
            llmDocumentSegmentsService.insert(new HashMap<>(), segment, documents);
        }
        // 返回切片的个数
        return segments.size();
    }

    /**
     * 正则表达式进行文档分块
     *
     * @param documents 文档信息
     * @return 分段数量
     */
    public Integer regexSegment(LlmDocuments documents) {
        List<String> segments = new ArrayList<>();

        // 拆分方式
        String sliceRegex = documents.getSliceRegex();

        try {
            // 正则表达式进行文档分块
            segments = ChunkUtils.regexChunking(documents.getContent(), sliceRegex);
            for (String segment : segments) {
                llmDocumentSegmentsService.insert(new HashMap<>(), segment, documents);
            }
            // 保存文档分段
        } catch (Exception e) {
            log.error("正则表达式分段失败", e);
            throw new RuntimeException("正则表达式分段失败");
        }

        // 返回分段数量
        return segments.size();
    }


    /**
     * 按章节进行文档分块
     *
     * @param documents 文本对象
     * @return 切片个数
     */
    public Integer chapterSegment(LlmDocuments documents) {
        String content = documents.getContent();
        // 创建HttpClient对象
        List<String> segments = new ArrayList<>();
        // 会话ID
        String conversationId = "";
        // 获取内容对应的段落
        getContentListByDocument(content, conversationId, segments);
        for (String segment : segments) {
            // 保存文档分段
            llmDocumentSegmentsService.insert(new HashMap<String, String>(), segment, documents);
        }
        // 返回分段数量
        return segments.size();
    }


    /**
     * 获取内容对应的段落
     *
     * @param content        文本内容
     * @param conversationId
     * @param contents
     */
    public void getContentListByDocument(String content, String conversationId, List<String> contents) {
        try {
            // 获取商品名称
            Map<String, Object> productByContent = getContentByDocument(content, conversationId);
            conversationId = productByContent.get("conversationId").toString();
            String resultContent = productByContent.get("answer").toString();
            if (StringUtils.isNotBlank(resultContent)) {
                // 解析结果
                JSONObject jsonObject = JSONObject.parseObject(resultContent);
                // 是否继续
                boolean isOver = jsonObject.getBoolean("over");
                // 商品列表
                JSONArray jsonArray = jsonObject.getJSONArray("texts");
                // 遍历jsonArray
                jsonArray.forEach(object -> {
                    contents.add(object.toString());
                });
                // 是否继续
                if (!isOver) {
                    getContentListByDocument("继续", conversationId, contents);
                }
            }
        } catch (Exception e) {
            log.error("【工作流请求失败】原因：", e);
            throw new RuntimeException("【工作流请求失败】原因：", e);
        }
    }

    /**
     * 获取内容对应的段落
     *
     * @param content 原文
     * @return
     */
    @Transactional(rollbackFor = Exception.class)
    public Map<String, Object> getContentByDocument(String content, String conversationId) {
        // 创建HttpClient对象
        CloseableHttpClient httpClient = HttpClients.createDefault();
        CloseableHttpResponse response;
        String resultString;

        try {
            // 创建Http Post请求
            //todo dify 配置暂时没有使用上
            HttpPost httpPost = new HttpPost("" + "/chat-messages");
            httpPost.setHeader("Content-Type", "application/json; utf-8");
            httpPost.setHeader("Authorization", "Bearer " + "");

            JSONObject inputs = new JSONObject();

            // 创建JSON对象
            JSONObject json = new JSONObject();
            json.put("inputs", inputs);
            json.put("query", content);
            json.put("conversation_id", conversationId == null ? "" : conversationId);
            json.put("response_mode", "blocking");
            json.put("user", "abc-123");

            // 设置请求体
            httpPost.setEntity(new StringEntity(json.toString(), StandardCharsets.UTF_8));

            // 执行http请求
            response = httpClient.execute(httpPost);
            resultString = EntityUtils.toString(response.getEntity(), "utf-8");
            JSONObject jsonObject = JSONObject.parseObject(resultString);

            /*System.err.println(resultString);
            throw new RuntimeException("【工作流请求失败】原因：" + resultString);*/

            try {
                Map<String, Object> result = new HashMap<>();
                result.put("conversationId", jsonObject.getString("conversation_id"));
                result.put("answer", StringUtils.trim(jsonObject.getString("answer")
                        .replaceAll("\\u200B|\\uFEFF| |\\s+", "")
                        .replaceAll("```json", "")
                        .replaceAll("```", "")));
                return result;
            } catch (Exception e) {
                log.error("【工作流返回结果解析失败】原因：{}", jsonObject, e);
                throw new RuntimeException("【工作流返回结果解析失败】原因：", e);
            }

        } catch (Exception e) {
            log.error("【工作流请求失败】原因：", e);
            throw new RuntimeException("【工作流请求失败】原因：", e);
        }
    }

    /**
     * 语义切片
     *
     * @param documents  文档对象
     * @param chunksJson 切片json对象（）
     * @param tables     表格
     * @param map
     * @return 切片
     */
    private List<String> semanticAware(LlmDocuments documents, List<JSONObject> chunksJson, Map<String, List<String>> tables, List<Map<String, String>> map) throws IOException, InterruptedException {
        //将切块中的占位符都替换回来
        List<String> list = HTMLParse.restoreChunkContent(chunksJson, tables);
        //存放18个要点
        //获取所有的分块
        for (JSONObject chunk : chunksJson) {
            String string = chunk.getString("content2");
            //如果还原后的原文不为空那么就跳过
            if (StringUtils.isNotBlank(string)) {
                continue;
            }
            String content = chunk.getString("content");
            String meta = chunk.getString("meta");
            JSONObject jsonObject1 = JSONObject.parseObject(meta);
            String id = jsonObject1.getString("id");
            HashMap<String, String> objectObjectHashMap = new HashMap<>();
            //如果有url 切块调用管道里的dify打标签-   那么就会提炼出18个要点，如果没有url 就不会提炼出18个要点
            //certificationManagement
            objectObjectHashMap.put("type", "chunk");
            objectObjectHashMap.put("chunkId", id);
            try {
                llmDocumentSegmentsService.insert(objectObjectHashMap, content, documents);
            } catch (Exception e) {
                log.error("存入数据失败", e);
            }
            //存入分块的要点
            map.add(objectObjectHashMap);
        }
        return list;
    }


    /**
     * 清洗文本内容
     *
     * @param template 文本模板，包含需要处理的文本结构
     */
    public void processTemplateChunkByDefault(String template, LlmDocuments documents) {
        try {


            // 取出所有的html 标签
            Map<String, List<String>> tables = new HashMap<>();
            template = HTMLParse.removeInlineStylesAndGetTables(template, tables);

            List<String> list = new CopyOnWriteArrayList<>();
            List<Map<String, String>> map = new ArrayList<>();
            // 切片方式
            String sliceMethod = documents.getSliceMethod();


            // 语义感知
            if (sliceMethod.equals(SliceMethod.DEFAULT.getSliceMethod())) {
                // 返回的切片内容
                List<JSONObject> chunksJson = ChunkUtils.getDocumentSplitByHttpRequest(template);
                list = semanticAware(documents, chunksJson, tables, map);
            }
            // 固定长度
            else if (sliceMethod.equals(SliceMethod.STANDARD.getSliceMethod())) {
                list = ChunkUtils.fixedSegment(documents.getContent(), 2400);
            }
            // 正则切分
            else if (sliceMethod.equals(SliceMethod.CUSTOM.getSliceMethod())) {
                list = ChunkUtils.regexChunking(template, documents.getSliceRegex());
            }
            // 不进行切片
            else if (sliceMethod.equals(SliceMethod.NONE.getSliceMethod())) {
                HashMap<String, String> objectObjectHashMap = new HashMap<>();
                objectObjectHashMap.put("type", "chunk");
                objectObjectHashMap.put("chunkId", "0");
                llmDocumentSegmentsService.insert(objectObjectHashMap, template, documents);
            }
            // 默认设置
            else {
                // 返回的切片内容
                List<JSONObject> chunksJson = ChunkUtils.getDocumentSplitByHttpRequest(template);
                list = semanticAware(documents, chunksJson, tables, map);
            }

            // 语义切片, 将每一个表格都进行抽取，这里的list就是表格内容
            for (String s : list) {
                Map<String, String> objectObjectHashMap = new HashMap<>();
                objectObjectHashMap.put("type", "table");
                objectObjectHashMap.put("chunkId", "");
                //如果存在产品就导入到前置表中并打上标签
                try {
                    // 插入存储向量信息
                    llmDocumentSegmentsService.insert(objectObjectHashMap, s, documents);
                } catch (Exception e) {
                    log.error("存入数据失败", e);
                }
                map.add(objectObjectHashMap);
            }


        } catch (Exception e) {
            log.error("【请求失败】原因：{}", e.getMessage(), e);
            throw new RuntimeException(e);
        }
    }


    /**
     * 人工切片
     *
     * @param content   人工修改后的原文
     * @param documents 文档信息
     * @return 切片数量
     */
    public int artificialChunk(String content, LlmDocuments documents) {

        //按照人工插入的分割符号进行切分
        String[] split = content.split("");
        for (String segmentContent : split) {
            HashMap<String, String> objectObjectHashMap = new HashMap<>();

            try {
                //todo 这里打标签目前没有处理
                objectObjectHashMap.put("type", "chunk");
            } catch (Exception e) {
                //存入分块的要点
                log.error("处理异常", e);
            }
            try {
                llmDocumentSegmentsService.insert(objectObjectHashMap, segmentContent, documents);
            } catch (Exception e) {
                log.error("存入数据失败");
            }
        }
        return split.length;
    }

    /**
     * 处理分块方法
     *
     * @param content   原文
     * @param documents 文档实体
     * @return 结果
     * @throws IOException 读取异常
     */
    public int processTemplateChunkBySpecial(String content, LlmDocuments documents) throws IOException {

        // 判断是否包含“^^^^^^^^”内容
        String[] split = content.split("\\^\\^\\^\\^\\^\\^\\^\\^");
        //获取所有的分块
        for (String segmentContent : split) {
            if (StringUtils.isBlank(segmentContent)) {
                continue;
            }
            try {
                HashMap<String, String> objectObjectHashMap = new HashMap<>();

                Map<String, String> certificationManagementNew = new HashMap<>();
                try {
                    //todo 这里打标签目前没有处理
                    //certificationManagementNew = getCertificationManagementNew(segmentContent, llmPipelineByCategoryId, "", "");
                    objectObjectHashMap.put("type", "chunk");
                    objectObjectHashMap.putAll(certificationManagementNew);

                } catch (Exception e) {
                    //存入分块的要点
                    log.error("处理异常", e);
                }
                try {
                    llmDocumentSegmentsService.insert(objectObjectHashMap, segmentContent, documents);
                } catch (Exception e) {
                    log.error("存入数据失败");
                }

            } catch (Exception e) {
                log.error("", e);
            }

        }
        return split.length;
    }


    /**
     * 新增一个文档
     *
     * @param llmFile   文件记录
     * @param documents 文本实体
     * @return 结果
     */
    public boolean insertDocument(LlmFile llmFile, LlmDocuments documents) {
        UUID uuid = UUID.randomUUID();
        // 设置知识库ID
        documents.setId(uuid.toString());
        documents.setEnabled(1);
        //所在分区的名称
        documents.setFileUrl(llmFile.getFileUrl());
        //todo 下面部分可能无用后期进行修改删除
        documents.setPower("1");
        documents.setStartTime(DateUtils.getTime());
        // 补全基本信息
        documents.setSegmentsCount(0);
        //创建人
        documents.setCreateBy(StpSystemUtil.getSysUser().getUsername());
        documents.setCreateTime(DateUtils.dateTimeNow("yyyy-MM-dd HH:mm:ss"));
        //所在树id
        documents.setContent(llmFile.getContent());
        documents.setWordCount(llmFile.getContent().length());
        documents.setFileType(llmFile.getFileType());
        documents.setUploadType(llmFile.getUploadType());
        return llmDocumentsMapper.insert(documents) > 0;
    }


    @Override
    @Transactional(rollbackFor = Exception.class)
    public boolean deleteDocumentsReal(String collectionName, String partitionName, String documentId) {
        try {

            // 操作数据库更新知识库的 WordCount 字段和 documentCount 字段
            // 根据 documentId 查询知识库文档的信息
            int documentCount = 0;
            LlmDocuments documents = llmDocumentsMapper.selectOne(new QueryWrapper<LlmDocuments>().eq("id", documentId));
            if (documents != null) {
                try {
                    documentCount = documents.getContent().length();
                } catch (Exception e) {
                    log.error("文本为空：{}", e.getMessage(), e);
                }

            }
            // 根据 datasetCode 查询知识库的信息
            LlmDatasets datasetsOld = llmDatasetsMapper.selectOne(new QueryWrapper<LlmDatasets>().eq("dataset_code", partitionName).eq("parent_id", "0"));
            //修改知识库上面的文件数量和总字数
            UpdateWrapper<LlmDatasets> llmDatasetsUpdateWrapper = new UpdateWrapper<>();
            llmDatasetsUpdateWrapper.eq("id", datasetsOld.getId());
            llmDatasetsUpdateWrapper.set("word_count", datasetsOld.getWordCount() - documentCount);
            llmDatasetsUpdateWrapper.set("document_count", datasetsOld.getDocumentCount() - 1);
            llmDatasetsMapper.update(llmDatasetsUpdateWrapper);
            // 删除知识库文档
            llmDocumentsMapper.deleteById(documentId);
            //先操作pg 数据库后操作milvus 这样milvus 出现异常可以让pg 回滚
            // 操作milvus删除文档分块
            // 根据文档id获取元素id集合
            if (MilvusUtils.hasCollection(collectionName)) {
                List<Object> elementIds = new ArrayList<>(List.of());
                QueryWrapper<LlmDocumentSegments> documentId1 = new QueryWrapper<LlmDocumentSegments>()
                        .eq("document_id", documentId);
                List<LlmDocumentSegments> segmentsList = llmDocumentsSegmentsMapper.selectList(documentId1);
                segmentsList.forEach(segments -> {
                    // 获取元素id集合
                    elementIds.add(segments.getElementId());
                });
                // 只有当elementIds不为空时才调用deleteEntities
                if (!elementIds.isEmpty()) {
                    // 删除知识库文档分块
                    llmDocumentsSegmentsMapper.delete(new QueryWrapper<LlmDocumentSegments>().eq("document_id", documentId));
                    MilvusUtils.deleteData(collectionName, elementIds);
                }
            }

            return true;
        } catch (Exception e) {
            log.error("【知识库文档删除失败】原因：{}", e.getMessage(), e);
            throw new RuntimeException();
        }
    }

    @Override
    @Transactional(rollbackFor = Exception.class)
    public boolean batchDeleteDocumentsReal(String collectionName, String partitionName, List<String> documentIds) {
        for (String documentId : documentIds) {
            deleteDocumentsReal(collectionName, partitionName, documentId);
        }
        return true;
    }

    @Override
    public List<LlmDocuments> getList(String folderId, String keyword) {
        if (StringUtils.isBlank(folderId)) {
            return new ArrayList<>();
        }
        QueryWrapper<LlmDocuments> objectQueryWrapper = new QueryWrapper<>();
        objectQueryWrapper.eq("tree_id", folderId);
        objectQueryWrapper.like(StringUtils.isNotBlank(keyword), "content", keyword);
        return llmDocumentsMapper.selectList(objectQueryWrapper);
    }

    @Override
    public List<Map<String, Object>> getDocumentStatus(List<String> ids) {
        QueryWrapper<LlmDocuments> llmDocumentsQueryWrapper = new QueryWrapper<>();
        llmDocumentsQueryWrapper.in("id", ids);
        llmDocumentsQueryWrapper.select("id", "status");
        return llmDocumentsMapper.selectMaps(llmDocumentsQueryWrapper);
    }

    @Override
    public LlmDocuments getDocumentById(String documentId) {
        if (StringUtils.isBlank(documentId)) {
            return new LlmDocuments();
        }
        QueryWrapper<LlmDocuments> llmDocumentsQueryWrapper = new QueryWrapper<>();
        llmDocumentsQueryWrapper.eq("id", documentId);
        return llmDocumentsMapper.selectOne(llmDocumentsQueryWrapper, false);
    }

    @Override
    public boolean editEnable(String documentId, String enable) {

        if (StringUtils.isBlank(documentId)) {
            return false;
        }
        UpdateWrapper<LlmDocuments> llmDocumentsUpdateWrapper = new UpdateWrapper<>();
        llmDocumentsUpdateWrapper.eq("id", documentId);
        llmDocumentsUpdateWrapper.set("enabled", enable);

        return llmDocumentsMapper.update(llmDocumentsUpdateWrapper) > 0;
    }

    @Override
    public boolean editInfo(LlmDocuments documents) {
        UpdateWrapper<LlmDocuments> llmDocumentsUpdateWrapper = new UpdateWrapper<>();
        llmDocumentsUpdateWrapper.eq("id", documents.getId());
        llmDocumentsUpdateWrapper.set("tag", documents.getTag());
        llmDocumentsUpdateWrapper.set("summary", documents.getSummary());
        llmDocumentsUpdateWrapper.set("catalog", documents.getCatalog());
        llmDocumentsUpdateWrapper.set("business_data", documents.getBusinessData());
        llmDocumentsUpdateWrapper.set("knowledge_name", documents.getKnowledgeName());


        return llmDocumentsMapper.update(llmDocumentsUpdateWrapper) > 0;
    }

    @Override
    public boolean editDocument(LlmDocuments documents) {
        //删除向量库中所有的向量数据
        List<Object> elementIds = new ArrayList<>(List.of());
        QueryWrapper<LlmDocumentSegments> documentId1 = new QueryWrapper<LlmDocumentSegments>()
                .eq("document_id", documents.getId());
        List<LlmDocumentSegments> segmentsList = llmDocumentsSegmentsMapper.selectList(documentId1);
        segmentsList.forEach(segments -> {
            // 获取元素id集合
            elementIds.add(segments.getElementId());
        });
        // 只有当elementIds不为空时才调用deleteEntities
        if (!elementIds.isEmpty()) {
            // 删除知识库文档分块
            llmDocumentsSegmentsMapper.delete(new QueryWrapper<LlmDocumentSegments>().eq("document_id", documents.getId()));
            MilvusUtils.deleteData(MilvusConstants.VECTOR_DOCUMENT_SEGMENT, elementIds);
        }

        //进行将人工处理的数据进行存入向量库 和pg库中
        int i = artificialChunk(documents.getContent(), documents);
        documents.setSegmentsCount(i);
        documents.setWordCount(documents.getContent().length());
        documents.setUpdateTime(DateUtils.getTime());
        //只需要修改切片数量切片大小原文元数据修改时间这几个字段就行
        llmDocumentsMapper.updateById(documents);
        return true;
    }


}
