package info.wangyuan.agent.service.rag;

import com.baomidou.mybatisplus.core.conditions.update.LambdaUpdateWrapper;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.parser.apache.poi.ApachePoiDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentBySentenceSplitter;
import dev.langchain4j.data.segment.TextSegment;
import info.wangyuan.agent.common.Constant.EmbeddingConstant;
import info.wangyuan.agent.entity.dto.RagDocSyncChunkMsgDTO;
import info.wangyuan.agent.entity.po.FileInfo;
import info.wangyuan.agent.mapper.DocumentUnitMapper;
import info.wangyuan.agent.mapper.FileInfoMapper;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;

import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * Word文档切分策略实现
 *
 * @author Albert
 * @since 2025-08-28 01:06:08
 */
@Slf4j
@Service(value = "ragDocSyncOcr-WORD")
public abstract class WORDRagDocSyncChunkStrategyImpl implements RagDocSyncChunkStrategy {

    private final FileInfoMapper fileInfoMapper;
    private final DocumentUnitMapper documentUnitMapper;

    public WORDRagDocSyncChunkStrategyImpl(FileInfoMapper fileInfoMapper, DocumentUnitMapper documentUnitMapper) {
        this.fileInfoMapper = fileInfoMapper;
        this.documentUnitMapper = documentUnitMapper;
    }

    @Override
    public void handle(RagDocSyncChunkMsgDTO ragDocSyncChunkMsgDTO, String strategy) {
        Integer fileId = ragDocSyncChunkMsgDTO.getFileId();
        FileInfo fileInfo = fileInfoMapper.selectById(fileId);
        if (fileInfo == null) {
            log.error("文件ID {} 不存在", fileId);
            return;
        }

        log.info("开始对Word文件进行切分，文件Id: {}", fileId);
        byte[] fileBytes;
        try {
            fileBytes = fileToBytes(fileInfo.getPath());
        } catch (IOException e) {
            log.error("读取文件 {} 失败", fileInfo.getPath(), e);
            return;
        }

        // 解析 Word 文档，获取文本片段
        Map<Integer, String> chunkData = processFile(fileBytes);

        // 更新总页数 / 片段数
        int totalSegments = chunkData.size();
        ragDocSyncChunkMsgDTO.setPageSize(totalSegments);

        LambdaUpdateWrapper<FileInfo> wrapper = Wrappers.lambdaUpdate(FileInfo.class)
                .eq(FileInfo::getId, fileId)
                .set(FileInfo::getPageSize, totalSegments);
        fileInfoMapper.update(wrapper);

        log.info("更新文件 {} 页数，总页数： {}", fileId, totalSegments);

        // 文档片段入库
        insertData(ragDocSyncChunkMsgDTO, chunkData, documentUnitMapper);

        log.info("Word文件切分入库完成，文件Id: {}", fileId);
    }

    public Map<Integer, String> processFile(byte[] fileBytes) {
        log.info("当前文件类型不是 PDF，文本是直接提取的 —— 不包含页码；页码的概念只是作为向量化的索引页存在。");

        // 使用ByteArrayInputStream将字节数组转换为输入流
        InputStream inputStream = new ByteArrayInputStream(fileBytes);

        final HashMap<Integer, String> chunkData = new HashMap<>();
        Document document;
        try {
            document = new ApachePoiDocumentParser().parse(inputStream);
            final List<TextSegment> split = new DocumentBySentenceSplitter(EmbeddingConstant.WORD_MAX_SEGMENT_SIZE_IN_CHARS, EmbeddingConstant.WORD_MAX_OVERLAP_SIZE_IN_CHARS).split(document);

            for (int i = 0; i < split.size(); i++) {
                String text = split.get(i).text();
                chunkData.put(i, text);
            }

            return chunkData;

        } catch (Exception e) {
            log.error("该文件处理异常", e);
        } finally {
            try {
                inputStream.close();
            } catch (IOException e) {
                log.error("输入流关闭异常", e);
            }
        }
        return chunkData;
    }
}
