"""
文本分割器
"""
import importlib
from typing import List, Dict, Any, Optional
from functools import lru_cache
from langchain.docstore.document import Document
from langchain.text_splitter import (
    RecursiveCharacterTextSplitter,
    SpacyTextSplitter,
    MarkdownHeaderTextSplitter,
    CharacterTextSplitter,
)

from ..config.constants import TEXT_SPLITTER_DICT
from ..config.settings import settings


class TextSplitter:
    """文本分割器"""
    
    def __init__(self, splitter_name: str = None, **kwargs):
        self.splitter_name = splitter_name or settings.text_splitter.splitter_name
        self.config = TEXT_SPLITTER_DICT.get(self.splitter_name, {})
        self.kwargs = kwargs
    
    @lru_cache()
    def make_text_splitter(self, splitter_name: str, chunk_size: int, chunk_overlap: int):
        """创建文本分割器实例"""
        try:
            # 特殊处理MarkdownHeaderTextSplitter
            if splitter_name == "MarkdownHeaderTextSplitter":
                headers_to_split_on = self.config.get("headers_to_split_on", [
                    ("#", "head1"),
                    ("##", "head2"),
                    ("###", "head3"),
                    ("####", "head4"),
                ])
                return MarkdownHeaderTextSplitter(
                    headers_to_split_on=headers_to_split_on,
                    strip_headers=False
                )
            
            # 尝试导入自定义分割器
            try:
                text_splitter_module = importlib.import_module("langchain.text_splitter")
                TextSplitterClass = getattr(text_splitter_module, splitter_name)
            except:
                # 回退到langchain默认分割器
                text_splitter_module = importlib.import_module("langchain.text_splitter")
                TextSplitterClass = getattr(text_splitter_module, "RecursiveCharacterTextSplitter")
            
            # 根据配置创建分割器
            source = self.config.get("source", "")
            
            if source == "tiktoken":
                # 使用tiktoken
                tokenizer_name = self.config.get("tokenizer_name_or_path", "cl100k_base")
                try:
                    text_splitter = TextSplitterClass.from_tiktoken_encoder(
                        encoding_name=tokenizer_name,
                        chunk_size=chunk_size,
                        chunk_overlap=chunk_overlap,
                    )
                except:
                    text_splitter = TextSplitterClass.from_tiktoken_encoder(
                        encoding_name=tokenizer_name,
                        chunk_size=chunk_size,
                        chunk_overlap=chunk_overlap,
                    )
            
            elif source == "huggingface":
                # 使用huggingface tokenizer
                tokenizer_name = self.config.get("tokenizer_name_or_path", "gpt2")
                if tokenizer_name == "gpt2":
                    from transformers import GPT2TokenizerFast
                    tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
                else:
                    from transformers import AutoTokenizer
                    tokenizer = AutoTokenizer.from_pretrained(
                        tokenizer_name,
                        trust_remote_code=True,
                    )
                
                text_splitter = TextSplitterClass.from_huggingface_tokenizer(
                    tokenizer=tokenizer,
                    chunk_size=chunk_size,
                    chunk_overlap=chunk_overlap,
                )
            
            else:
                # 默认创建
                try:
                    text_splitter = TextSplitterClass(
                        chunk_size=chunk_size,
                        chunk_overlap=chunk_overlap,
                    )
                except:
                    text_splitter = TextSplitterClass(
                        chunk_size=chunk_size,
                        chunk_overlap=chunk_overlap,
                    )
            
            return text_splitter
            
        except Exception as e:
            print(f"创建文本分割器失败: {e}")
            # 回退到默认分割器
            return RecursiveCharacterTextSplitter(
                chunk_size=chunk_size,
                chunk_overlap=chunk_overlap,
            )
    
    def split_documents(
        self,
        documents: List[Document],
        chunk_size: int = None,
        chunk_overlap: int = None,
        splitter_name: str = None,
    ) -> List[Document]:
        """分割文档列表"""
        chunk_size = chunk_size or settings.text_splitter.chunk_size
        chunk_overlap = chunk_overlap or settings.text_splitter.chunk_overlap
        splitter_name = splitter_name or self.splitter_name
        
        if not documents:
            return []
        
        # 创建分割器
        text_splitter = self.make_text_splitter(splitter_name, chunk_size, chunk_overlap)
        
        # 执行分割
        if splitter_name == "MarkdownHeaderTextSplitter":
            # MarkdownHeaderTextSplitter特殊处理
            docs = text_splitter.split_text(documents[0].page_content)
            # 转换为Document对象
            split_docs = []
            for i, doc in enumerate(docs):
                if isinstance(doc, Document):
                    split_docs.append(doc)
                else:
                    split_docs.append(Document(
                        page_content=doc,
                        metadata=documents[0].metadata.copy()
                    ))
            return split_docs
        else:
            return text_splitter.split_documents(documents)
    
    def split_text(
        self,
        text: str,
        chunk_size: int = None,
        chunk_overlap: int = None,
        splitter_name: str = None,
    ) -> List[str]:
        """分割单个文本"""
        chunk_size = chunk_size or settings.text_splitter.chunk_size
        chunk_overlap = chunk_overlap or settings.text_splitter.chunk_overlap
        splitter_name = splitter_name or self.splitter_name
        
        if not text:
            return []
        
        # 创建分割器
        text_splitter = self.make_text_splitter(splitter_name, chunk_size, chunk_overlap)
        
        # 执行分割
        return text_splitter.split_text(text)


class ChineseTextSplitter(CharacterTextSplitter):
    """中文文本分割器"""
    
    def __init__(self, pdf: bool = False, sentence_size: int = 250, **kwargs):
        super().__init__(**kwargs)
        self.pdf = pdf
        self.sentence_size = sentence_size
    
    def split_text(self, text: str) -> List[str]:
        """分割中文文本"""
        import re
        
        if self.pdf:
            # PDF文本预处理
            text = re.sub(r"\n{3,}", "\n", text)
            text = re.sub("\s", " ", text)
            text = text.replace("\n\n", "")
        
        # 中文分句
        sent_sep_pattern = re.compile(
            '([﹒﹔﹖﹗．。！？]["'"」』]{0,2}|(?=["'"「『]{1,2}|$))'
        )
        
        sent_list = []
        for ele in sent_sep_pattern.split(text):
            if sent_sep_pattern.match(ele) and sent_list:
                sent_list[-1] += ele
            elif ele:
                sent_list.append(ele)
        
        # 处理长句
        result = []
        for sent in sent_list:
            if len(sent) > self.sentence_size:
                # 进一步分割长句
                sub_sents = self._split_long_sentence(sent)
                result.extend(sub_sents)
            else:
                result.append(sent)
        
        return result
    
    def _split_long_sentence(self, sentence: str) -> List[str]:
        """分割长句"""
        import re
        
        # 按标点符号分割
        separators = ['，', '。', '！', '？', '；', '：', '、']
        pattern = '|'.join(map(re.escape, separators))
        
        parts = re.split(f'({pattern})', sentence)
        result = []
        current_part = ""
        
        for part in parts:
            if part in separators:
                current_part += part
                if len(current_part) >= self.sentence_size:
                    result.append(current_part)
                    current_part = ""
            else:
                current_part += part
        
        if current_part:
            result.append(current_part)
        
        return result 