import re
from typing import List, Any

from langchain_text_splitters import RecursiveCharacterTextSplitter
from llama_index.core.extractors import SummaryExtractor
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.node_parser import SemanticSplitterNodeParser, LangchainNodeParser
from llama_index.core.node_parser.text.utils import split_by_phrase_regex
from llama_index.core.schema import TransformComponent, BaseNode, Document
from loguru import logger

from ..api.knowledge import KnowledgeBaseSettings
from ..config.resource import get_embedding, get_llm


def clean_single_text(text: str) -> str:
    """清洗单个文本"""
    text = re.sub(r'\s+', ' ', text)
    # 去除特殊字符（保留中英文、数字、常用标点）
    text = re.sub(r'[^\u4e00-\u9fa5a-zA-Z0-9\s.,;:!?()[\\]{}"''—-]', '', text)
    # 去除重复标点
    text = re.sub(r'([.,;:!?])\1+', r'\1', text)
    # 去除首尾空白
    text = text.strip()
    return text


class CleanTextTransform(TransformComponent):

    def __call__(self, nodes: List[BaseNode], **kwargs: Any) -> List[BaseNode]:
        try:
            logger.info(f"开始清洗 {len(nodes)} 个文档")
            for node in nodes:
                # 清理文本
                logger.info(f"原始文本：{node.get_content()}")
                cleaned_text = clean_single_text(node.get_content())
                node.set_content(cleaned_text)
                node.metadata["cleaned"] = True
            logger.info(f"文本清洗完成")
            return nodes

        except Exception as e:
            logger.exception(f"文本清洗失败: {e}")
            raise

    async def acall(self, nodes: List[BaseNode], **kwargs: Any) -> List[BaseNode]:
        return self.__call__(nodes, **kwargs)


class DataTransformer:
    """数据转换器 - 用于构建文档处理的管道"""

    def __init__(self, base_setting: KnowledgeBaseSettings):
        self._base_setting = base_setting

    def create_pipeline(self, documents: List[Document]):

        transformations: List[TransformComponent] = [CleanTextTransform()]

        if self._base_setting.text_split_strategy == 'semantic':
            # 使用语义分割器
            transformations.append(SemanticSplitterNodeParser(
                embed_model=get_embedding(),
                sentence_splitter=split_by_phrase_regex(),
                buffer_size=7,
                breakpoint_percentile_threshold=95
            ))
        else:
            # 使用递归字符分割器
            transformations.append(LangchainNodeParser(RecursiveCharacterTextSplitter(
                separators=self._base_setting.split_chars,
                chunk_overlap=self._base_setting.chunk_overlap,
                chunk_size=self._base_setting.chunk_size,
            )))

        # 对于长文档索引，添加摘要提取器
        if self._base_setting.index_type == "long_document":
            # pass
            # 如果是长文档索引，添加摘要提取器
            transformations.append(SummaryExtractor(summaries=["prev", "self"], llm=get_llm()))

        pipeline = IngestionPipeline(
            documents=documents,
            transformations=transformations
        )
        return pipeline
