import logging
from typing import List, Optional, Dict, Any

# 尝试导入文档分割相关库
try:
    from langchain.schema import Document
    from langchain.text_splitter import RecursiveCharacterTextSplitter
    LANGCHAIN_AVAILABLE = True
except ImportError:
    logging.warning("Langchain文本分割库未安装，将使用基础分割功能")
    LANGCHAIN_AVAILABLE = False
    
    # 创建简单的Document类作为替代
    class Document:
        def __init__(self, page_content: str, metadata: Dict[str, Any] = None):
            self.page_content = page_content
            self.metadata = metadata or {}

async def split_document(documents: List[Document], 
                         chunk_size: int = 1000, 
                         chunk_overlap: int = 200) -> List[Document]:
    """
    分割文档为更小的块
    
    Args:
        documents: 文档列表
        chunk_size: 每个块的最大字符数
        chunk_overlap: 块之间的重叠字符数
        
    Returns:
        分割后的文档块列表
    """
    if not documents:
        return []
        
    try:
        if not LANGCHAIN_AVAILABLE:
            return await split_document_fallback(documents, chunk_size, chunk_overlap)
            
        # 使用Langchain的文本分割器
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=chunk_size,
            chunk_overlap=chunk_overlap,
            separators=["\n\n", "\n", "。", "！", "？", "，", " ", ""]
        )
        
        return text_splitter.split_documents(documents)
        
    except Exception as e:
        logging.error(f"分割文档失败: {str(e)}")
        # 返回原始文档，确保功能不会完全失败
        return documents
        
async def split_document_fallback(documents: List[Document], 
                                 chunk_size: int = 1000, 
                                 chunk_overlap: int = 200) -> List[Document]:
    """当Langchain不可用时的基础文档分割实现"""
    result = []
    
    for doc in documents:
        content = doc.page_content
        metadata = doc.metadata
        
        if len(content) <= chunk_size:
            result.append(doc)
            continue
            
        # 简单分割逻辑
        chunks = []
        start = 0
        
        while start < len(content):
            end = min(start + chunk_size, len(content))
            
            # 如果不是最后一块，尝试在句子边界处分割
            if end < len(content):
                # 尝试在常见的句子结束符处分割
                for sep in ["。", "！", "？", "\n", ".", "!", "?"]:
                    pos = content.rfind(sep, start, end)
                    if pos > start:
                        end = pos + 1
                        break
                        
            chunks.append(Document(
                page_content=content[start:end],
                metadata=metadata.copy()
            ))
            
            # 考虑重叠
            start = end - chunk_overlap
            if start < 0 or start >= len(content):
                break
                
        result.extend(chunks)
    
    return result 