package com.example.springai.service.impl;

import cn.hutool.core.io.FileUtil;
import cn.hutool.core.lang.UUID;
import cn.hutool.core.util.IdUtil;
import cn.hutool.core.util.StrUtil;
import cn.hutool.crypto.digest.MD5;
import cn.hutool.json.JSONUtil;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.conditions.update.LambdaUpdateWrapper;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.example.springai.common.R;
import com.example.springai.domain.AiDocument;
import com.example.springai.domain.AiDocumentChunk;
import com.example.springai.enums.FileTypeEnum;
import com.example.springai.mapper.AiDocumentChunkMapper;
import com.example.springai.mapper.AiDocumentMapper;
import com.example.springai.service.IAiDocumentChunkService;
import com.example.springai.service.IAiDocumentService;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.ai.chat.model.ChatModel;
import org.springframework.ai.content.Media;
import org.springframework.ai.document.DefaultContentFormatter;
import org.springframework.ai.document.Document;
import org.springframework.ai.embedding.EmbeddingModel;
import org.springframework.ai.model.transformer.KeywordMetadataEnricher;
import org.springframework.ai.model.transformer.SummaryMetadataEnricher;
import org.springframework.ai.reader.ExtractedTextFormatter;
import org.springframework.ai.reader.JsonReader;
import org.springframework.ai.reader.TextReader;
import org.springframework.ai.reader.pdf.PagePdfDocumentReader;
import org.springframework.ai.reader.pdf.ParagraphPdfDocumentReader;
import org.springframework.ai.reader.pdf.config.PdfDocumentReaderConfig;
import org.springframework.ai.transformer.ContentFormatTransformer;
import org.springframework.ai.transformer.splitter.TokenTextSplitter;
import org.springframework.ai.vectorstore.VectorStore;
import org.springframework.core.io.DefaultResourceLoader;
import org.springframework.core.io.Resource;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;

import java.io.InputStream;
import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;

/**
 * AI知识库服务实现类
 *
 * @author ZWX
 */
@Slf4j
@Service
@RequiredArgsConstructor
@Transactional(rollbackFor = Exception.class)
public class AiDocumentServiceImpl extends ServiceImpl<AiDocumentMapper, AiDocument> implements IAiDocumentService {
    private final ChatModel chatModel;
    private final VectorStore vectorStore;
    private final IAiDocumentChunkService aiDocumentChunkService;

    /**
     * 上传文件
     *
     * @param file 文件
     * @return 结果
     */
    @Override
    public boolean uploadDocument(MultipartFile file) {
        AiDocument aiDocument = new AiDocument();
        String originalFilename = file.getOriginalFilename();

        if (StrUtil.isBlank(originalFilename)) {
            log.error("获取原文件名失败。");
            return false;
        }

        long documentId = IdUtil.getSnowflakeNextId();
        aiDocument.setId(documentId);
        String extName = FileUtil.extName(originalFilename);
        String contentType = file.getContentType();
        aiDocument.setCreatedTime(LocalDateTime.now());
        aiDocument.setUpdatedTime(LocalDateTime.now());
        aiDocument.setOriginalName(originalFilename);
        aiDocument.setFileType(extName);
        aiDocument.setContentType(contentType);
        aiDocument.setName(originalFilename.substring(0, originalFilename.lastIndexOf(".")) + "-" + System.currentTimeMillis());
        aiDocument.setFileSize(file.getSize());
        // 上传文件到本地
        try (InputStream is = file.getInputStream()) {
            // 先将文件内容读取到字节数组，避免InputStream被重复使用
            byte[] fileBytes = is.readAllBytes();

            // 计算文档MD5值并判断是否已存在
            String md5 = MD5.create().digestHex(fileBytes);
            LambdaQueryWrapper<AiDocument> queryWrapper = new LambdaQueryWrapper<>();
            queryWrapper.eq(AiDocument::getMd5, md5).last("limit 1");
            if (Objects.nonNull(getOne(queryWrapper))) {
                log.info("\"{}\"文档已存在", originalFilename);
                return true;
            }

            aiDocument.setMd5(md5);
            String filePath = "D:\\Code\\java\\spring-ai\\upload\\documents\\" + aiDocument.getName() + "." + extName;
            // 使用字节数组写入文件
            FileUtil.writeBytes(fileBytes, filePath);
            aiDocument.setFilePath(filePath);

            // 向量化
            vectorize(aiDocument);

            return save(aiDocument);
        } catch (Exception e) {

            // 检查文档是否上传
            String filePath = aiDocument.getFilePath();
            if (FileUtil.exist(filePath)) {
                FileUtil.del(filePath);
            }

            log.error("上传文件失败", e);
            throw new RuntimeException("上传文件失败");
        }
    }

    /**
     * 获取文件列表
     *
     * @param req 请求数据
     * @return 文件列表
     */
    @Override
    public List<AiDocument> getList(AiDocument req) {
        LambdaQueryWrapper<AiDocument> queryWrapper = new LambdaQueryWrapper<>();
        queryWrapper.like(StrUtil.isNotBlank(req.getName()), AiDocument::getName, req.getName());
        queryWrapper.like(StrUtil.isNotBlank(req.getOriginalName()), AiDocument::getOriginalName, req.getOriginalName());
        queryWrapper.eq(StrUtil.isNotBlank(req.getFileType()), AiDocument::getFileType, req.getFileType());
        return list(queryWrapper);
    }

    /**
     * 删除文档
     *
     * @param documentId 文档id
     * @return 删除结果
     */
    @Override
    public boolean deleteDocument(Long documentId) {
        AiDocument aiDocument = getById(documentId);
        List<AiDocumentChunk> aiDocumentChunks = aiDocumentChunkService.getByDocumentId(documentId);
        // 删除向量数据
        List<String> chunkIds = aiDocumentChunks.stream().map(AiDocumentChunk::getId).toList();
        vectorStore.delete(chunkIds);
        // 删除文档块数据
        LambdaQueryWrapper<AiDocumentChunk> documentChunkQueryWrapper = new LambdaQueryWrapper<>();
        documentChunkQueryWrapper.eq(AiDocumentChunk::getDocumentId, documentId);
        aiDocumentChunkService.remove(documentChunkQueryWrapper);
        // 删除文档数据
        removeById(documentId);
        // 删除文件
        FileUtil.del(aiDocument.getFilePath());
        return true;
    }

    /**
     * 根据文档ID获取文档详情（包含文档块列表）
     *
     * @param documentId 文档ID
     * @return 文档详情
     */
    @Override
    public AiDocument getDocumentDetail(Long documentId) {
        AiDocument aiDocument = getById(documentId);
        if (Objects.nonNull(aiDocument)) {
            // 获取文档块列表
            List<AiDocumentChunk> chunks = aiDocumentChunkService.getByDocumentId(documentId);
            aiDocument.setChunks(chunks);
        }
        return aiDocument;
    }

    /**
     * 文档向量化（异步）
     *
     * @param aiDocument 文档
     */
    @Async
    private void vectorize(AiDocument aiDocument) {
        String fileType = aiDocument.getFileType();
        // 使用file:前缀来正确处理绝对路径
        Resource resource = new DefaultResourceLoader().getResource("file:" + aiDocument.getFilePath());

        // Document
        // {
        // id='5ce06e11-23ae-4bee-a3a3-0b02e3a0a781',
        // text='hutool-ai	AI大模型封装',
        // media='null',
        // metadata={charset=UTF-8, excerpt_keywords=Keywords: Hutool-AI, 大模型封装, AI集成, Java工具库, 智能API, fileName=hutool.txt, source=hutool-1761029626097.txt},
        // score=null
        // }

        // token文本分割器
        TokenTextSplitter splitter = new TokenTextSplitter();
        // 关键词元数据增强器
        KeywordMetadataEnricher keywordMetadataEnricher = KeywordMetadataEnricher.builder(chatModel)
                .keywordCount(5)
                .build();
        // 摘要元数据增强器
        SummaryMetadataEnricher summaryMetadataEnricher = new SummaryMetadataEnricher(chatModel,
                List.of(SummaryMetadataEnricher.SummaryType.PREVIOUS, SummaryMetadataEnricher.SummaryType.CURRENT, SummaryMetadataEnricher.SummaryType.NEXT));
        // 内容格式转换器
        DefaultContentFormatter defaultContentFormatter = DefaultContentFormatter.defaultConfig();
        ContentFormatTransformer contentFormatTransformer = new ContentFormatTransformer(defaultContentFormatter);

        FileTypeEnum fileTypeEnum = FileTypeEnum.getByType(fileType);
        if (Objects.isNull(fileTypeEnum)) {
            log.error("不支持的文件类型：{}", fileType);
            return;
        }

        switch (fileTypeEnum) {
            case FileTypeEnum.TEXT:
                // 1.读取文档内容
                TextReader textReader = new TextReader(resource);
                Map<String, Object> customMetadata = textReader.getCustomMetadata();
                customMetadata.put("fileName", aiDocument.getOriginalName());
                List<Document> documents = textReader.read();
                // 2.将文本分割成块
                List<Document> splitDocuments = splitter.apply(documents);

                LambdaUpdateWrapper<AiDocument> updateWrapper = new LambdaUpdateWrapper<>();
                updateWrapper.eq(AiDocument::getId, aiDocument.getId());
                updateWrapper.set(AiDocument::getChunkCount, splitDocuments.size());
                update(updateWrapper);

                // 3.为每个块添加关键词元数据
//                List<Document> keywordDocuments = keywordMetadataEnricher.apply(splitDocuments);
//                log.info("\n{}\n", keywordDocuments);
                addAiDocumentChunkData(aiDocument, splitDocuments);

                // 4.为每个块添加摘要元数据
                // List<Document> summaryDocuments = summaryMetadataEnricher.apply(keywordDocuments);
                // 5.格式化文本
                // List<Document> formattedDocuments = contentFormatTransformer.apply(summaryDocuments);
                // 6.添加到向量存储中
                vectorStore.add(splitDocuments);
                break;
            case FileTypeEnum.PDF:
                ExtractedTextFormatter extractedTextFormatter = ExtractedTextFormatter.builder()
                        .withNumberOfTopTextLinesToDelete(0)
                        .build();
                PdfDocumentReaderConfig pdfDocumentReaderConfig = PdfDocumentReaderConfig.builder()
                        .withPageTopMargin(0)
                        .withPageExtractedTextFormatter(extractedTextFormatter)
                        .withPagesPerDocument(1)
                        .build();
                // The uses Apache PdfBox library to parse PDF documents. PagePdfDocumentReader
                // PagePdfDocumentReader pagePdfDocumentReader = new PagePdfDocumentReader(resource, pdfDocumentReaderConfig);
                // log.info("\n{}\n", pagePdfDocumentReader.read());

                // The uses the PDF catalog (e.g. TOC) information to split the input PDF into text paragraphs and output a single per paragraph.
                // NOTE: Not all PDF documents contain the PDF catalog.
                ParagraphPdfDocumentReader paragraphPdfDocumentReader = new ParagraphPdfDocumentReader(resource);
                log.info("\n{}\n", paragraphPdfDocumentReader.read());
                break;
            case FileTypeEnum.JSON:
                JsonReader jsonReader = new JsonReader(resource);
                log.info("\n{}\n", jsonReader.read());
                break;
            default:
                break;
        }
    }

    /**
     * 添加文档块数据
     *
     * @param aiDocument 文档
     * @param documents 文档块
     */
    private void addAiDocumentChunkData(AiDocument aiDocument, List<Document> documents) {
        // 将文档块信息存储到数据库中
        List<AiDocumentChunk> aiDocumentChunks = new ArrayList<>();
        for (Document document : documents) {
            AiDocumentChunk aiDocumentChunk = new AiDocumentChunk();
            aiDocumentChunk.setDocumentId(aiDocument.getId());
            aiDocumentChunk.setId(document.getId());
            aiDocumentChunk.setText(document.getText());
            Media media = document.getMedia();
            if (Objects.nonNull(media)) {
                aiDocumentChunk.setMedia(media.getName());
            }
            Map<String, Object> metadata = document.getMetadata();
            aiDocumentChunk.setMetadata(JSONUtil.toJsonStr(metadata));
            aiDocumentChunk.setCreatedTime(LocalDateTime.now());
            aiDocumentChunks.add(aiDocumentChunk);
        }
        aiDocumentChunkService.saveBatch(aiDocumentChunks);
    }
}
