package com.wfh.aiagent.rag;

import lombok.extern.slf4j.Slf4j;
import org.springframework.ai.document.Document;
import org.springframework.ai.reader.markdown.MarkdownDocumentReader;
import org.springframework.ai.reader.markdown.config.MarkdownDocumentReaderConfig;
import org.springframework.core.io.Resource;
import org.springframework.core.io.support.ResourcePatternResolver;
import org.springframework.stereotype.Component;

import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.util.*;

/**
 * @Author FengHuan Wang
 * @Date 2025/5/26 9:38
 * @Version 1.0
 */
@Component
@Slf4j
public class ProgramingAppDocLoader {

    private final ResourcePatternResolver resourcePatternResolver;

    @jakarta.annotation.Resource
    private SemanticChunker semanticChunker;


    public ProgramingAppDocLoader(ResourcePatternResolver resourcePatternResolver) {
        this.resourcePatternResolver = resourcePatternResolver;
    }


    private String computeStableId(String path, String content) {
        try {
            String input = path + "\n" + content;
            byte[] hash = MessageDigest.getInstance("SHA-256").digest(input.getBytes(StandardCharsets.UTF_8));
            return Base64.getUrlEncoder().withoutPadding().encodeToString(hash).substring(0, 22); // 短 ID
        } catch (Exception e) {
            throw new RuntimeException("Failed to compute stable ID", e);
        }
    }

    private String computeContentHash(String content) {
        try {
            byte[] hash = MessageDigest.getInstance("SHA-256").digest(content.getBytes(StandardCharsets.UTF_8));
            return Base64.getEncoder().encodeToString(hash);
        } catch (Exception e) {
            throw new RuntimeException("Failed to compute content hash", e);
        }
    }

    /**
     * 加载多篇文档
     * @return doc
     */
    public List<Document> loadMds() {
        List<Document> documents = new ArrayList<>();
        try {
            Resource[] resources = resourcePatternResolver.getResources("classpath*:doc/*.md");
            for (Resource resource : resources) {
                String filename = resource.getFilename();
                if (filename == null){
                    continue;
                }

                // 1. 读取完整内容（用于 hash 和 stable base ID）
                String fullContent = new String(resource.getInputStream().readAllBytes(), StandardCharsets.UTF_8);
                String contentHash = computeContentHash(fullContent);
                String docIdBase = computeStableId(filename, contentHash);

                String status = filename.length() > 7
                        ? filename.substring(filename.length() - 7, filename.length() - 4)
                        : "unk";

                // 2. 用 自定义的语义化分割器 分割成 chunks
                List<Document> rawChunks = semanticChunker.split(resource);
//                MarkdownDocumentReaderConfig readerConfig = MarkdownDocumentReaderConfig.builder()
//                        .withHorizontalRuleCreateDocument(true)// 遇到 --- 分割
//                        .withIncludeBlockquote(true)
//                        .withIncludeCodeBlock(true) // ⚠️ 注意：这会丢弃代码块！
//                        .build();

//                MarkdownDocumentReader reader = new MarkdownDocumentReader(resource, readerConfig);
//                List<Document> rawChunks = reader.get(); // 每个是一个 chunk

                // 3. 为每个 chunk 创建带稳定 ID 的新 Document
                for (int i = 0; i < rawChunks.size(); i++) {
                    Document rawChunk = rawChunks.get(i);
                    String chunkText = rawChunk.getText(); // 原始 chunk 内容

                    // 跳过空 chunk
                    if (chunkText == null || chunkText.trim().isEmpty()) {
                        continue;
                    }

                    String chunkId = docIdBase + "_chunk_" + i;

                    Map<String, Object> metadata = new HashMap<>();
                    metadata.put("filename", filename);
                    metadata.put("status", status);
                    metadata.put("hash", contentHash);        // 整个文档的 hash
                    metadata.put("chunk_index", i);
                    metadata.put("total_chunks", rawChunks.size());

                    Document finalChunk = new Document(chunkId, chunkText, metadata);
                    documents.add(finalChunk);
                }
            }
        } catch (IOException e) {
            log.error("文档加载失败", e);
        }
        return documents;
    }
}
