from typing import List

from llama_index.core.schema import Document, BaseNode
from llama_index.core.node_parser import SentenceSplitter

from axiom_boot.di import service

from ..node_parser_interface import NodeParserInterface
from .sentence_splitter_config import SentenceSplitterConfig


class SentenceSplitterParser(NodeParserInterface):
    """
    一个实现了 NodeParserInterface 的具体解析器，
    它在内部使用 LlamaIndex 的 SentenceSplitter。
    """

    def __init__(self, config: SentenceSplitterConfig):
        self._config = config
        try:
            self._parser = SentenceSplitter(
                chunk_size=config.chunk_size,
                chunk_overlap=config.chunk_overlap,
                paragraph_separator=config.paragraph_separator,
                secondary_chunking_regex=config.secondary_chunking_regex,
                include_prev_next_rel=config.include_prev_next_rel,
            )
        except ValueError as e:
            # 捕获底层库的原始 ValueError，并用更明确的业务语言重新抛出。
            # 这使得 500 错误的日志更具可读性，同时不改变其错误状态码。
            if "chunk overlap" in str(e):
                raise ValueError(
                    f"参数配置错误：文本块重叠大小 ({config.chunk_overlap}) "
                    f"不能大于或等于文本块大小 ({config.chunk_size})。"
                ) from e
            raise e

    def parse_nodes(self, documents: List[Document]) -> List[BaseNode]:
        """
        使用配置好的 SentenceSplitter 来执行节点解析。
        """
        return self._parser.get_nodes_from_documents(documents) 