# 使用较新的langchain_text_splitters模块
from langchain_text_splitters import RecursiveCharacterTextSplitter, TokenTextSplitter
from langchain_experimental.text_splitter import SemanticChunker
from langchain_core.documents import Document
from ..core.config import settings
from ..core.logger import logger


def split_documents(documents):
    """切分文档"""
    splitter = RecursiveCharacterTextSplitter(
        chunk_size=getattr(settings, "CHUNK_SIZE", 800),
        chunk_overlap=getattr(settings, "CHUNK_OVERLAP", 100)
    )
    return splitter.split_documents(documents)

def should_use_token_splitting(documents):
    """
    判断是否应该使用Token分块策略
    结构化文本（如表格数据、代码等）适合使用Token分块
    """
    if not documents:
        return False
    
    # 示例判断逻辑，可以根据实际情况调整
    # 1. 检查文档元数据中是否有结构化标记
    for doc in documents[:10]:  # 只检查前10个文档以提高效率
        if 'file_type' in doc.metadata:
            filetype = doc.metadata['file_type'].lower()
            if filetype in ['csv', 'json', 'xml']:
                return True
        
        # 2. 检查内容是否包含大量结构化特征
        content = doc.page_content[:500]  # 只检查前500字符
        if ('{' in content and '}' in content and ':' in content) or \
           ('<' in content and '>' in content) or \
           (',' in content and '=' in content):
            return True
    
    return False

def split_documents_improved(documents, embedding_model=None):
    """改进的文档切分函数"""
    logger.info(f"开始切分文档，共 {len(documents)} 个文档")
    
    # 判断是否使用Token分块
    use_token_splitting = should_use_token_splitting(documents)
    
    if use_token_splitting:
        logger.info("使用Token分块策略")
        splitter = TokenTextSplitter(
            chunk_size=getattr(settings, "TOKEN_CHUNK_SIZE", 512),
            chunk_overlap=getattr(settings, "TOKEN_CHUNK_OVERLAP", 80)
        )
    elif embedding_model:
        # 如果提供了嵌入模型，使用语义分块
        logger.info("使用语义分块策略")
        try:
            splitter = SemanticChunker(
                embedding_model,
                breakpoint_threshold_type="percentile"
            )
        except Exception as e:
            logger.warning(f"语义分块初始化失败: {e}，回退到字符分块")
            # 回退到默认的字符分块
            splitter = RecursiveCharacterTextSplitter(
                chunk_size=getattr(settings, "CHUNK_SIZE", 512),
                chunk_overlap=getattr(settings, "CHUNK_OVERLAP", 80)
            )
    else:
        # 默认使用字符分块
        logger.info("使用字符分块策略")
        splitter = RecursiveCharacterTextSplitter(
            chunk_size=getattr(settings, "CHUNK_SIZE", 512),
            chunk_overlap=getattr(settings, "CHUNK_OVERLAP", 80)
        )
    
    # 使用split_documents方法确保元数据正确传递
    split_docs = splitter.split_documents(documents)
    
    # 确保所有分割后的文档都有完整的元数据
    for i, doc in enumerate(split_docs):
        # 确保文档有metadata属性
        if not hasattr(doc, 'metadata') or doc.metadata is None:
            doc.metadata = {}
            
        # 确保关键元数据字段存在
        key_fields = ['source', 'filename', 'company_name', 'source_path', 'file_type', 'filetype']
        for field in key_fields:
            if field not in doc.metadata or not doc.metadata[field]:
                # 从原始文档中查找对应字段
                original_index = min(i, len(documents) - 1)
                if hasattr(documents[original_index], 'metadata') and documents[original_index].metadata:
                    if field in documents[original_index].metadata:
                        doc.metadata[field] = documents[original_index].metadata[field]
    
    logger.info(f"文档切分完成，共生成 {len(split_docs)} 个文本块")
    return split_docs

#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
文档分割器模块
用于将长文档分割成较小的块，便于向量化和检索
"""

from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_core.documents import Document
from typing import List, Optional, Any
from ..core.config import settings
from ..core.logger import logger


def split_documents_improved(
    documents: List[Document], 
    embedding_model: Optional[Any] = None
) -> List[Document]:
    """
    改进的文档分割函数，确保元数据正确传递
    
    Args:
        documents: 待分割的文档列表
        embedding_model: 嵌入模型（可选，用于语义分割）
        
    Returns:
        分割后的文档列表
    """
    logger.info(f"开始分割 {len(documents)} 个文档...")
    
    # 创建文本分割器
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=settings.CHUNK_SIZE,
        chunk_overlap=settings.CHUNK_OVERLAP,
        length_function=len,
        is_separator_regex=False,
    )
    
    split_docs = []
    for doc in documents:
        # 分割文档
        chunks = text_splitter.split_text(doc.page_content)
        
        # 为每个chunk创建新的Document对象，并确保元数据正确传递
        for i, chunk in enumerate(chunks):
            # 创建新的Document对象，继承原始文档的元数据
            new_doc = Document(
                page_content=chunk,
                metadata=doc.metadata.copy()  # 复制原始元数据
            )
            
            # 可以添加额外的元数据，如chunk编号等
            new_doc.metadata['chunk_index'] = i
            new_doc.metadata['total_chunks'] = len(chunks)
            
            split_docs.append(new_doc)
    
    logger.info(f"文档分割完成，共创建 {len(split_docs)} 个文本块")
    return split_docs
