"""
文档处理模块
实现文档加载、解析和分块等功能
"""

import os
from typing import List, Dict, Optional, Union
from pathlib import Path
import logging
from dataclasses import dataclass
import asyncio

import docx
import fitz  # PyMuPDF
from bs4 import BeautifulSoup
import markdown

logger = logging.getLogger(__name__)

@dataclass
class DocumentChunk:
    """文档分块"""
    text: str
    metadata: Dict
    
@dataclass
class Document:
    """文档对象"""
    content: str
    metadata: Dict
    chunks: Optional[List[DocumentChunk]] = None

class DocumentProcessor:
    """文档处理器"""
    
    def __init__(self, config: Optional[Dict] = None):
        self.config = config or {}
        self.chunk_size = self.config.get("chunk_size", 500)
        self.chunk_overlap = self.config.get("chunk_overlap", 50)
        self.min_chunk_size = self.config.get("min_chunk_size", 100)
        
    async def load_document(self, file_path: Union[str, Path]) -> Document:
        """加载文档"""
        file_path = Path(file_path)
        
        if not file_path.exists():
            raise FileNotFoundError(f"文件不存在: {file_path}")
            
        # 获取文件元数据
        metadata = {
            "file_name": file_path.name,
            "file_path": str(file_path),
            "file_type": file_path.suffix.lower(),
            "file_size": os.path.getsize(file_path),
            "created_time": os.path.getctime(file_path),
            "modified_time": os.path.getmtime(file_path)
        }
        
        # 根据文件类型选择相应的加载方法
        content = await self._load_file_content(file_path)
        
        return Document(content=content, metadata=metadata)
        
    async def _load_file_content(self, file_path: Path) -> str:
        """加载文件内容"""
        file_type = file_path.suffix.lower()
        
        try:
            if file_type == ".txt":
                return await self._load_text(file_path)
            elif file_type == ".pdf":
                return await self._load_pdf(file_path)
            elif file_type in [".doc", ".docx"]:
                return await self._load_word(file_path)
            elif file_type == ".md":
                return await self._load_markdown(file_path)
            elif file_type == ".html":
                return await self._load_html(file_path)
            else:
                raise ValueError(f"不支持的文件类型: {file_type}")
        except Exception as e:
            logger.error(f"加载文件失败 {file_path}: {str(e)}")
            raise
            
    async def _load_text(self, file_path: Path) -> str:
        """加载文本文件"""
        async with asyncio.Lock():
            with open(file_path, "r", encoding="utf-8") as f:
                return f.read()
                
    async def _load_pdf(self, file_path: Path) -> str:
        """加载PDF文件"""
        async with asyncio.Lock():
            doc = fitz.open(file_path)
            text = ""
            for page in doc:
                text += page.get_text()
            return text
            
    async def _load_word(self, file_path: Path) -> str:
        """加载Word文件"""
        async with asyncio.Lock():
            doc = docx.Document(file_path)
            return "\n".join([paragraph.text for paragraph in doc.paragraphs])
            
    async def _load_markdown(self, file_path: Path) -> str:
        """加载Markdown文件"""
        async with asyncio.Lock():
            with open(file_path, "r", encoding="utf-8") as f:
                md_text = f.read()
            html = markdown.markdown(md_text)
            return BeautifulSoup(html, "html.parser").get_text()
            
    async def _load_html(self, file_path: Path) -> str:
        """加载HTML文件"""
        async with asyncio.Lock():
            with open(file_path, "r", encoding="utf-8") as f:
                html = f.read()
            return BeautifulSoup(html, "html.parser").get_text()
            
    def process_document(self, document: Document) -> Document:
        """处理文档"""
        # 清理文本
        cleaned_text = self._clean_text(document.content)
        document.content = cleaned_text
        
        # 分块
        chunks = self._split_text(cleaned_text, document.metadata)
        document.chunks = chunks
        
        return document
        
    def _clean_text(self, text: str) -> str:
        """清理文本"""
        # 移除多余的空白字符
        text = " ".join(text.split())
        
        # 移除特殊字符
        text = text.replace("\x00", "")
        
        return text
        
    def _split_text(self, text: str, metadata: Dict) -> List[DocumentChunk]:
        """文本分块"""
        chunks = []
        start = 0
        
        while start < len(text):
            # 确定块的结束位置
            end = start + self.chunk_size
            
            if end >= len(text):
                chunk_text = text[start:]
            else:
                # 查找最近的句子结束符
                last_period = text.rfind("。", start, end)
                last_question = text.rfind("？", start, end)
                last_exclamation = text.rfind("！", start, end)
                
                # 选择最近的句子结束位置
                end = max(last_period, last_question, last_exclamation)
                if end == -1 or end <= start:
                    end = start + self.chunk_size
                else:
                    end += 1
                    
                chunk_text = text[start:end]
            
            # 如果块太小，与下一块合并
            if len(chunk_text) < self.min_chunk_size and start + len(chunk_text) < len(text):
                continue
                
            # 创建块元数据
            chunk_metadata = {
                **metadata,
                "chunk_index": len(chunks),
                "chunk_start": start,
                "chunk_end": end
            }
            
            chunks.append(DocumentChunk(text=chunk_text, metadata=chunk_metadata))
            
            # 更新开始位置，考虑重叠
            start = end - self.chunk_overlap
            
        return chunks 