package com.example.deepseek.service.impl;

import com.example.deepseek.model.DocumentChunk;
import com.example.deepseek.service.EmbeddingService;
import com.example.deepseek.service.PdfProcessingService;
import com.example.deepseek.service.VectorStoreService;

import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.text.PDFTextStripper;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;

import java.io.IOException;
import java.io.InputStream;
import java.util.*;

@Service
public class PdfProcessingServiceImpl implements PdfProcessingService {

    @Value("${pdf.chunk.size:400}")
    private int chunkSize;

    @Value("${pdf.chunk.overlap:40}")
    private int chunkOverlap;
    
    private final VectorStoreService vectorStore;
    private final EmbeddingService embeddingService;
    
    public PdfProcessingServiceImpl(VectorStoreService vectorStore, EmbeddingService embeddingService) {
        this.vectorStore = vectorStore;
        this.embeddingService = embeddingService;
    }

    @Override
    public String processPdfDocument(MultipartFile file) throws IOException {
        // 获取文件名和大小信息
        String fileName = file.getOriginalFilename();
        long fileSize = file.getSize();
        
        // 提取PDF文本
        String fullText = extractTextFromPdf(file.getInputStream());
        
        // 分割文本成块
        List<String> chunks = splitTextIntoChunks(fullText);
        
        // 生成文档唯一ID
        String docId = "doc_" + System.currentTimeMillis();
        
        // 处理每个文本块
        List<String> ids = new ArrayList<>();
        List<String> contents = new ArrayList<>();
        List<float[]> embeddings = new ArrayList<>();
        List<Map<String, Object>> metadatas = new ArrayList<>();
        
        for (int i = 0; i < chunks.size(); i++) {
            String chunk = chunks.get(i);
            String chunkId = docId + "_chunk_" + i;
            
            ids.add(chunkId);
            contents.add(chunk);
            
            // 生成嵌入向量
            float[] embedding = embeddingService.generateEmbedding(chunk);
            embeddings.add(embedding);
            
            // 创建元数据
            Map<String, Object> metadata = new HashMap<>();
            metadata.put("source", fileName);
            metadata.put("doc_id", docId);
            metadata.put("chunk_index", i);
            metadatas.add(metadata);
        }
        
        // 添加到向量存储
        vectorStore.addDocuments(ids, contents, embeddings, metadatas);
        
        return String.format("成功处理PDF文件: %s (%.2f KB), 创建了 %d 个文本块", 
                fileName, fileSize / 1024.0, chunks.size());
    }

    @Override
    public List<DocumentChunk> searchRelevantChunks(String question, int maxResults) {
        // 为问题生成向量嵌入
        float[] questionEmbedding = embeddingService.generateEmbedding(question);
        
        // 混合搜索
        return vectorStore.hybridSearch(question, questionEmbedding, maxResults);
    }

    @Override
    public int clearAllDocuments() {
        vectorStore.clearAll();
        return 1;
    }

    /**
     * 从PDF文件提取文本
     */
    private String extractTextFromPdf(InputStream inputStream) throws IOException {
        try (PDDocument document = PDDocument.load(inputStream)) {
            PDFTextStripper stripper = new PDFTextStripper();
            return stripper.getText(document);
        }
    }

    /**
     * 将文本分割为较小的块
     */
    private List<String> splitTextIntoChunks(String text) {
        List<String> chunks = new ArrayList<>();
        
        // 使用递归分割策略
        List<String> paragraphs = Arrays.asList(text.split("\\n\\n"));
        
        StringBuilder currentChunk = new StringBuilder();
        
        for (String paragraph : paragraphs) {
            // 如果当前段落加上当前块超过了块大小，保存当前块并开始新块
            if (currentChunk.length() + paragraph.length() > chunkSize) {
                // 如果当前块不为空，保存它
                if (currentChunk.length() > 0) {
                    chunks.add(currentChunk.toString());
                    
                    // 保留部分重叠内容
                    String[] words = currentChunk.toString().split("\\s+");
                    if (words.length > chunkOverlap / 10) { // 假设平均每个词10个字符
                        currentChunk = new StringBuilder();
                        for (int i = words.length - chunkOverlap / 10; i < words.length; i++) {
                            currentChunk.append(words[i]).append(" ");
                        }
                    } else {
                        currentChunk = new StringBuilder();
                    }
                }
            }
            
            // 添加段落到当前块
            currentChunk.append(paragraph).append("\n\n");
            
            // 如果当前块已经超过块大小，强制保存
            if (currentChunk.length() >= chunkSize) {
                chunks.add(currentChunk.toString());
                currentChunk = new StringBuilder();
            }
        }
        
        // 添加最后一个块，如果不为空
        if (currentChunk.length() > 0) {
            chunks.add(currentChunk.toString());
        }
        
        return chunks;
    }
}