from llama_index.core import Document
from embedding_model import EmbeddingClient
import re
from typing import Dict, List, Optional
from llama_index.core.node_parser import (
    HierarchicalNodeParser,
    TokenTextSplitter,
    SentenceSplitter,
    SemanticSplitterNodeParser
)
from llama_index.core.ingestion import IngestionPipeline, IngestionCache
from llama_index.core.schema import TransformComponent
from pydantic import PrivateAttr,Field


class LegalChunkingPipeline:
    def __init__(
        self,
       # 层级配置
        part_chunk_size: int = 4096,  # 编层级较大
        chapter_chunk_size: int = 2048,
        section_chunk_size: int = 1024,
        article_chunk_size: int = 512,  # 条层级较小
        # 高级配置
        chunk_overlap: int = 64,
        semantic_threshold: float = 0.85,
        # 嵌入模型配置
        embedding_model: EmbeddingClient = None,
    ):
        """初始化法律文档分块流水线
        
        参数说明：
        - chunk_sizes: 必须按层级顺序提供，如 [2048, 512] 表示两级分块
        - node_parser_map: 必须包含所有层级的NodeParser配置
        """
        
        if not embedding_model:
            raise ValueError("embed_model is None")
        #初始化语义分块器
        self.embed_model = embedding_model
        
        # 标题检测正则
        self.title_patterns = {
            "part": r"^第[一二三四五六七八九十百]+编",
            "chapter": r"^第[一二三四五六七八九十百]+章",
            "section": r"^第[一二三四五六七八九十百]+节",
            "article": r"^第[一二三四五六七八九十百]+条"
        }
        # 构建动态层级分块器
        self.parser = self._build_dynamic_parser(
            part_chunk_size,
            chapter_chunk_size,
            section_chunk_size,
            article_chunk_size,
            chunk_overlap,
            semantic_threshold
        )
    
    def _build_dynamic_parser(
        self,
        part_size: int,
        chapter_size: int,
        section_size: int,
        article_size: int,
        overlap: int,
        semantic_threshold: float
    ) -> HierarchicalNodeParser:
        """构建动态层级分块器"""
        # 1. 编层级 - 使用Token分块（内容量大）
        part_parser = TokenTextSplitter(
            chunk_size=part_size,
            chunk_overlap=overlap,
            separator=self.title_patterns["part"]
        )
        
        # 2. 章层级 - 使用语义分块（保持主题完整）
        chapter_parser = SemanticSplitterNodeParser(
            buffer_size=2,
            breakpoint_percentile_threshold=int(semantic_threshold * 100),
            embed_model=self.embed_model,
            include_metadata=True
        )
        
        # 3. 节层级 - 动态处理（可能不存在）
        # 备用：当节不存在时使用章分块器
        section_parser = chapter_parser
        
        # 4. 条层级 - 句子分块（内容精炼）
        article_parser = SentenceSplitter(
            chunk_size=article_size,
            chunk_overlap=overlap//2,  # 较小重叠
            paragraph_separator=self.title_patterns["article"]
        )
        
        return HierarchicalNodeParser(
            chunk_sizes=[part_size, chapter_size, section_size, article_size],
            node_parser_map={
                "part": part_parser,
                "chapter": chapter_parser,
                "section": section_parser,
                "article": article_parser
            },
            node_parser_ids=["part", "chapter", "section", "article"]
        )
    
    def _detect_structure(self, text: str) -> dict:
        """检测文档层级结构"""
        structure = {
            "has_section": False,
            "max_depth": 1  # 默认只有编
        }
        
        # 检测各层级标题存在情况
        for level, pattern in self.title_patterns.items():
            if re.search(pattern, text, re.MULTILINE):
                structure["max_depth"] = max(structure["max_depth"], 
                                           list(self.title_patterns.keys()).index(level) + 1)
                if level == "section":
                    structure["has_section"] = True
        
        return structure
    
    def _adjust_parser_for_structure(self, structure: dict):
        """根据检测到的结构调整分块器"""
        # 如果不存在节层级，跳过该层
        if not structure["has_section"]:
            self.parser.node_parser_map["section"] = self.parser.node_parser_map["chapter"]
            self.parser.chunk_sizes[2] = self.parser.chunk_sizes[1]  # 使用章级大小
            
        # 调整实际使用的层级深度
        self.parser.node_parser_ids = list(self.title_patterns.keys())[:structure["max_depth"]]
    
    def _extract_hierarchy_metadata(self, text: str) -> dict:
        """提取当前文本的层级元数据"""
        metadata = {}
        for level, pattern in self.title_patterns.items():
            match = re.search(pattern, text)
            if match:
                metadata[level] = match.group(0).strip()
                # 找到后重置更小层级
                for lower_level in list(self.title_patterns.keys())[list(self.title_patterns.keys()).index(level)+1:]:
                    metadata[lower_level] = None
            elif level not in metadata:
                metadata[level] = None
        return metadata
    
    def chunk_documents(self, documents: List[Document]) -> List[Document]:
        """执行智能分块"""
        all_nodes = []
        
        for doc in documents:
            # 检测文档结构
            structure = self._detect_structure(doc.text)
            
            # 动态调整分块器
            self._adjust_parser_for_structure(structure)
            
            # 执行分层分块
            nodes = self.parser.get_nodes_from_documents([doc])
            
            # 添加层级元数据（修复版）
            for node in nodes:
                try:
                    # 1. 提取层级元数据
                    hierarchy_meta = self._extract_hierarchy_metadata(node.text[:200])
                    
                    # 2. 确定主层级（从大到小检查）
                    primary_level = None
                    for level in reversed(self.title_patterns.keys()):
                        if hierarchy_meta.get(level):
                            primary_level = level
                            break
                    
                    # 3. 更新元数据
                    node.metadata.update({
                        "document_hierarchy": hierarchy_meta,
                        "primary_level": primary_level,
                        "structure_type": "legal"
                    })
                    all_nodes.append(node)
                    
                except Exception as e:
                    print(f"处理节点时出错: {str(e)}")
                    continue
    
        return all_nodes

class DataChunkPipeline(TransformComponent):
    _legal_chunk_pipeline: Optional[LegalChunkingPipeline] = PrivateAttr(default=None)

    """数据分块流水线，处理文本数据并进行分块"""
    def __init__(
        self,
       # 层级配置
        part_chunk_size: int = 4096,  # 编层级较大
        chapter_chunk_size: int = 2048,
        section_chunk_size: int = 1024,
        article_chunk_size: int = 512,  # 条层级较小
        # 高级配置
        chunk_overlap: int = 64,
        semantic_threshold: float = 0.85,
        # 嵌入模型配置
        embedding_model: EmbeddingClient = None,
    ):
        super().__init__()
        if embedding_model:
            self._legal_chunk_pipeline = LegalChunkingPipeline(embedding_model=embedding_model)
        else:
            raise ValueError("embedding_model is None")
    def __call__(self, nodes, **kwargs):
        chunk_nodes = self._legal_chunk_pipeline.chunk_documents(nodes)
        return chunk_nodes

