"""
文档处理模块
处理各种格式的文档，提取文本并分块
"""
import os
from typing import List, Dict, Any, Optional
from pathlib import Path
from pydantic import BaseModel, Field
import logging

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class DocumentChunk(BaseModel):
    """文档块模型"""
    content: str
    metadata: Dict[str, Any] = Field(default_factory=dict)
    chunk_id: Optional[str] = None
    document_id: Optional[str] = None

class DocumentProcessor:
    """处理各种格式的文档"""
    
    def __init__(self, chunk_size: int = 1000, chunk_overlap: int = 200):
        """
        初始化文档处理器
        
        Args:
            chunk_size: 每个文本块的大小
            chunk_overlap: 块之间的重叠大小
        """
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
    
    def load_document(self, file_path: str) -> str:
        """
        加载文档内容
        
        Args:
            file_path: 文档路径
            
        Returns:
            文档文本内容
        """
        file_path = Path(file_path)
        if not file_path.exists():
            raise FileNotFoundError(f"文件不存在: {file_path}")
            
        # 根据文件扩展名选择加载方法
        ext = file_path.suffix.lower()
        
        try:
            if ext == '.txt':
                return self._load_txt(file_path)
            elif ext == '.pdf':
                return self._load_pdf(file_path)
            elif ext in ['.docx', '.doc']:
                return self._load_docx(file_path)
            elif ext == '.pptx':
                return self._load_pptx(file_path)
            else:
                raise ValueError(f"不支持的文件格式: {ext}")
        except Exception as e:
            logger.error(f"加载文档 {file_path} 时出错: {str(e)}")
            raise
    
    def _load_txt(self, file_path: Path) -> str:
        """加载TXT文件"""
        with open(file_path, 'r', encoding='utf-8') as f:
            return f.read()
    
    def _load_pdf(self, file_path: Path) -> str:
        """加载PDF文件"""
        from pypdf import PdfReader
        
        reader = PdfReader(file_path)
        text = ""
        for page in reader.pages:
            text += page.extract_text() + "\n"
        return text
    
    def _load_docx(self, file_path: Path) -> str:
        """加载Word文档"""
        from docx import Document
        
        doc = Document(file_path)
        return "\n".join([para.text for para in doc.paragraphs if para.text])
    
    def _load_pptx(self, file_path: Path) -> str:
        """加载PPT文件"""
        from pptx import Presentation
        
        prs = Presentation(file_path)
        text = []
        for slide in prs.slides:
            for shape in slide.shapes:
                if hasattr(shape, "text"):
                    text.append(shape.text)
        return "\n".join(text)
    
    def chunk_text(self, text: str, metadata: Dict[str, Any] = None) -> List[DocumentChunk]:
        """
        将文本分割成块
        
        Args:
            text: 要分割的文本
            metadata: 元数据
            
        Returns:
            文档块列表
        """
        if metadata is None:
            metadata = {}
            
        # 简单的按句子分割（可根据需要替换为更复杂的分割策略）
        import re
        
        # 按句子分割（简单实现）
        sentences = re.split(r'(?<=[.!?])\s+', text)
        
        chunks = []
        current_chunk = []
        current_length = 0
        
        for sentence in sentences:
            sentence = sentence.strip()
            if not sentence:
                continue
                
            sentence_length = len(sentence)
            
            # 如果当前块不为空，且添加新句子会超过最大长度，则保存当前块
            if current_chunk and (current_length + sentence_length > self.chunk_size):
                chunk_text = " ".join(current_chunk)
                chunks.append(DocumentChunk(
                    content=chunk_text,
                    metadata=metadata.copy()
                ))
                
                # 保留重叠部分
                overlap_start = max(0, len(current_chunk) - self.chunk_overlap)
                current_chunk = current_chunk[overlap_start:]
                current_length = sum(len(s) for s in current_chunk) + len(current_chunk) - 1
            
            current_chunk.append(sentence)
            current_length += sentence_length + 1  # +1 for space
        
        # 添加最后一个块
        if current_chunk:
            chunk_text = " ".join(current_chunk)
            chunks.append(DocumentChunk(
                content=chunk_text,
                metadata=metadata.copy()
            ))
        
        return chunks
    
    def process_document(self, file_path: str, metadata: Dict[str, Any] = None) -> List[DocumentChunk]:
        """
        处理文档并返回块
        
        Args:
            file_path: 文档路径
            metadata: 元数据
            
        Returns:
            文档块列表
        """
        if metadata is None:
            metadata = {}
            
        # 添加文件名到元数据
        file_metadata = {
            "source": os.path.basename(file_path),
            "file_path": str(file_path),
            "file_type": os.path.splitext(file_path)[1].lower().lstrip('.')
        }
        file_metadata.update(metadata)
        
        # 加载文档
        text = self.load_document(file_path)
        
        # 分块
        chunks = self.chunk_text(text, file_metadata)
        
        return chunks
