"""
分块策略模块

实现多种前沿的分块策略，包括固定大小、语义、递归和层次分块。
"""

import re
import math
from typing import List, Dict, Optional, Any
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np

from llama_index.core.schema import Document, Node
from llama_index.core.node_parser import SentenceSplitter

from ..config import config, ChunkStrategy
from .metadata import ChunkMetadata


class BaseChunkingStrategy(ABC):
    """
    分块策略基类

    定义分块策略的统一接口，所有具体策略必须实现这些方法。
    """

    @abstractmethod
    def chunk(self, documents: List[Document], **kwargs) -> List[Node]:
        """
        执行分块操作

        Args:
            documents: 待分块的文档列表
            **kwargs: 额外的分块参数

        Returns:
            List[Node]: 分块后的节点列表
        """
        pass

    @abstractmethod
    def get_strategy_info(self) -> Dict[str, Any]:
        """
        获取策略信息

        Returns:
            Dict[str, Any]: 策略配置和参数信息
        """
        pass

    def _create_node(
        self,
        text: str,
        doc_id: str,
        chunk_index: int,
        metadata: Optional[Dict[str, Any]] = None
    ) -> Node:
        """
        创建标准化的节点

        Args:
            text: 节点文本内容
            doc_id: 文档ID
            chunk_index: 分块索引
            metadata: 额外的元数据

        Returns:
            Node: 创建的节点
        """
        node_metadata = metadata or {}
        node_metadata.update({
            "chunking_strategy": self.__class__.__name__.replace("ChunkingStrategy", "").lower(),
            "chunk_index": chunk_index,
            "source_doc_id": doc_id
        })

        return Node(
            text=text,
            metadata=node_metadata
        )


class FixedSizeChunkingStrategy(BaseChunkingStrategy):
    """
    固定大小分块策略

    按照固定的字符数对文档进行分块，适合结构不明确的文档。
    """

    def __init__(self, chunk_size: int = 1024, chunk_overlap: int = 100):
        """
        初始化固定大小分块策略

        Args:
            chunk_size: 每个分块的目标大小（字符数）
            chunk_overlap: 分块之间的重叠字符数
        """
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap

    def chunk(self, documents: List[Document], **kwargs) -> List[Node]:
        """执行固定大小分块"""
        splitter = SentenceSplitter(
            chunk_size=self.chunk_size,
            chunk_overlap=self.chunk_overlap
        )
        chunks = []
        for doc_idx, doc in enumerate(documents):
            doc_chunks = splitter.get_nodes_from_documents([doc])
            # 为每个分块添加标准元数据
            for i, chunk in enumerate(doc_chunks):
                chunk.metadata.update({
                    "chunking_strategy": "fixed_size",
                    "source_doc_id": f"doc_{doc_idx}",
                    "chunk_size": len(chunk.text)
                })
                chunks.append(chunk)
        return chunks

    def get_strategy_info(self) -> Dict[str, Any]:
        return {
            "strategy": "fixed_size",
            "chunk_size": self.chunk_size,
            "chunk_overlap": self.chunk_overlap,
            "description": "固定大小分块 - 按指定字符数均匀分割"
        }


class SemanticChunkingStrategy(BaseChunkingStrategy):
    """
    语义分块策略

    基于内容语义边界进行分块，保持语义完整性。
    使用滑动窗口计算句子间的语义相似度，在相似度较低的位置切分。
    """

    def __init__(
        self,
        threshold: float = 0.7,
        window_size: int = 3,
        min_chunk_size: int = 50,
        max_chunk_size: int = 2048
    ):
        """
        初始化语义分块策略

        Args:
            threshold: 语义相似度阈值，低于此值时进行切分
            window_size: 滑动窗口大小，用于计算相似度
            min_chunk_size: 最小分块大小
            max_chunk_size: 最大分块大小
        """
        self.threshold = threshold
        self.window_size = window_size
        self.min_chunk_size = min_chunk_size
        self.max_chunk_size = max_chunk_size

    def _calculate_semantic_similarity(self, text1: str, text2: str) -> float:
        """
        计算两个文本的语义相似度

        这里使用简化的词汇重叠度计算，实际应用中可以替换为嵌入模型计算。

        Args:
            text1: 第一个文本
            text2: 第二个文本

        Returns:
            float: 相似度得分（0-1之间）
        """
        # 去除停用词的简化词汇重叠计算
        words1 = set(text1.lower().split())
        words2 = set(text2.lower().split())

        # 过滤掉停用词
        stopwords = {'的', '是', '在', '了', '和', '有', '我', '你', '他', '她', '它',
                    'the', 'is', 'at', 'which', 'on', 'a', 'an', 'and', 'or', 'but'}

        words1 = words1 - stopwords
        words2 = words2 - stopwords

        if not words1 or not words2:
            return 0.0

        intersection = words1.intersection(words2)
        union = words1.union(words2)

        return len(intersection) / len(union) if union else 0.0

    def _find_semantic_boundaries(self, sentences: List[str]) -> List[int]:
        """
        找到语义边界位置

        Args:
            sentences: 句子列表

        Returns:
            List[int]: 边界位置索引列表
        """
        if len(sentences) < 2:
            return [0, len(sentences)]

        boundaries = [0]  # 开始边界
        current_chunk_size = 0

        for i in range(1, len(sentences)):
            current_chunk_size += len(sentences[i-1])

            # 检查是否达到最小分块大小
            if current_chunk_size >= self.min_chunk_size:
                # 计算与后续句子的相似度
                window_end = min(i + self.window_size, len(sentences))
                similarities = []

                for j in range(i, window_end):
                    similarity = self._calculate_semantic_similarity(
                        sentences[i-1], sentences[j]
                    )
                    similarities.append(similarity)

                avg_similarity = np.mean(similarities) if similarities else 0.0

                # 如果相似度低于阈值或达到最大大小，则在此处分块
                if avg_similarity < self.threshold or current_chunk_size >= self.max_chunk_size:
                    boundaries.append(i)
                    current_chunk_size = 0

        boundaries.append(len(sentences))  # 结束边界
        return boundaries

    def chunk(self, documents: List[Document], **kwargs) -> List[Node]:
        """执行语义分块"""
        chunks = []

        for doc_idx, doc in enumerate(documents):
            # 分割文档为句子
            sentences = self._split_into_sentences(doc.text)

            if not sentences:
                continue

            # 找到语义边界
            boundaries = self._find_semantic_boundaries(sentences)

            # 根据边界创建分块
            for i in range(len(boundaries) - 1):
                start_idx = boundaries[i]
                end_idx = boundaries[i + 1]

                chunk_sentences = sentences[start_idx:end_idx]
                chunk_text = " ".join(chunk_sentences)

                if chunk_text.strip():
                    chunk = self._create_node(
                        text=chunk_text,
                        doc_id=f"doc_{doc_idx}",
                        chunk_index=i,
                        metadata={
                            "sentence_range": (start_idx, end_idx),
                            "chunk_size": len(chunk_text),
                            "semantic_threshold": self.threshold
                        }
                    )
                    chunks.append(chunk)

        return chunks

    def _split_into_sentences(self, text: str) -> List[str]:
        """将文本分割为句子"""
        # 改进的句子分割，支持更多标点符号
        sentences = re.split(r'[.!?。！？;；]+', text)
        return [s.strip() for s in sentences if s.strip()]

    def get_strategy_info(self) -> Dict[str, Any]:
        return {
            "strategy": "semantic",
            "threshold": self.threshold,
            "window_size": self.window_size,
            "min_chunk_size": self.min_chunk_size,
            "max_chunk_size": self.max_chunk_size,
            "description": "语义分块 - 基于内容语义边界分割"
        }


class RecursiveChunkingStrategy(BaseChunkingStrategy):
    """
    递归分块策略

    按照指定的分隔符递归分割文档，保持文档结构完整性。
    适合有明确结构标记的文档（如Markdown、HTML等）。
    """

    def __init__(
        self,
        chunk_size: int = 1024,
        chunk_overlap: int = 100,
        separators: Optional[List[str]] = None
    ):
        """
        初始化递归分块策略

        Args:
            chunk_size: 每个分块的目标大小
            chunk_overlap: 分块间的重叠大小
            separators: 分隔符列表，按优先级排序
        """
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.separators = separators or ["\n\n", "\n", "。", "！", "？", ".", "!", "?"]

    def chunk(self, documents: List[Document], **kwargs) -> List[Node]:
        """执行递归分块"""
        splitter = SentenceSplitter(
            chunk_size=self.chunk_size,
            chunk_overlap=self.chunk_overlap
        )
        chunks = []

        for doc_idx, doc in enumerate(documents):
            doc_chunks = splitter.get_nodes_from_documents([doc])
            for i, chunk_node in enumerate(doc_chunks):
                # 添加递归分块的元数据
                chunk_node.metadata.update({
                    "chunking_strategy": "recursive",
                    "source_doc_id": f"doc_{doc_idx}",
                    "chunk_index": i,
                    "separators": self.separators,
                    "chunk_size": len(chunk_node.text)
                })
                chunks.append(chunk_node)

        return chunks

    def get_strategy_info(self) -> Dict[str, Any]:
        return {
            "strategy": "recursive",
            "chunk_size": self.chunk_size,
            "chunk_overlap": self.chunk_overlap,
            "separators": self.separators,
            "description": "递归分块 - 按分隔符递归分割"
        }


class HierarchicalChunkingStrategy(BaseChunkingStrategy):
    """
    层次分块策略

    创建多级分块层次结构，包含父级大分块和子级小分块。
    适合需要多粒度检索的复杂文档。
    """

    def __init__(
        self,
        parent_chunk_size: int = 2048,
        child_chunk_size: int = 512,
        overlap: int = 50
    ):
        """
        初始化层次分块策略

        Args:
            parent_chunk_size: 父级分块大小
            child_chunk_size: 子级分块大小
            overlap: 分块重叠大小
        """
        self.parent_chunk_size = parent_chunk_size
        self.child_chunk_size = child_chunk_size
        self.overlap = overlap

    def chunk(self, documents: List[Document], **kwargs) -> List[Node]:
        """执行层次分块"""
        chunks = []

        for doc_idx, doc in enumerate(documents):
            # 创建父级分块
            parent_splitter = SentenceSplitter(
                chunk_size=self.parent_chunk_size,
                chunk_overlap=self.overlap
            )
            parent_chunks = parent_splitter.get_nodes_from_documents([doc])

            # 为每个父级分块创建子级分块
            for parent_idx, parent_chunk in enumerate(parent_chunks):
                parent_id = f"doc_{doc_idx}_parent_chunk_{parent_idx}"

                # 创建子级分块
                child_splitter = SentenceSplitter(
                    chunk_size=self.child_chunk_size,
                    chunk_overlap=self.overlap
                )

                # 使用父级分块文本创建临时文档
                temp_doc = Document(text=parent_chunk.text, metadata=parent_chunk.metadata)
                child_chunks = child_splitter.get_nodes_from_documents([temp_doc])

                child_ids = []
                for child_idx, child_chunk in enumerate(child_chunks):
                    child_id = f"{parent_id}_child_chunk_{child_idx}"
                    child_ids.append(child_id)

                    # 添加子级分块元数据
                    child_chunk.metadata.update({
                        "chunking_strategy": "hierarchical",
                        "hierarchy_level": "child",
                        "parent_chunk_id": parent_id,
                        "chunk_index": child_idx,
                        "source_doc_id": f"doc_{doc_idx}",
                        "chunk_size": len(child_chunk.text)
                    })
                    chunks.append(child_chunk)

                # 更新父级分块元数据
                parent_chunk.metadata.update({
                    "chunking_strategy": "hierarchical",
                    "hierarchy_level": "parent",
                    "child_chunk_ids": child_ids,
                    "chunk_index": parent_idx,
                    "source_doc_id": f"doc_{doc_idx}",
                    "chunk_size": len(parent_chunk.text)
                })
                chunks.append(parent_chunk)

        return chunks

    def get_strategy_info(self) -> Dict[str, Any]:
        return {
            "strategy": "hierarchical",
            "parent_chunk_size": self.parent_chunk_size,
            "child_chunk_size": self.child_chunk_size,
            "overlap": self.overlap,
            "description": "层次分块 - 多级层次结构分块"
        }


class FileTypeAwareStrategy(BaseChunkingStrategy):
    """
    文件类型感知分块策略

    根据不同的文件类型使用最适合的分块策略。
    """

    def __init__(self):
        """初始化文件类型感知策略"""
        self.strategies = {}
        self._setup_default_strategies()

    def _setup_default_strategies(self):
        """设置默认的文件类型策略映射"""
        self.strategies = {
            'pdf': RecursiveChunkingStrategy(
                chunk_size=config.chunk_size,
                chunk_overlap=config.chunk_overlap,
                separators=["\n\n", "\n", " ", ""]
            ),
            'markdown': RecursiveChunkingStrategy(
                chunk_size=config.chunk_size,
                chunk_overlap=config.chunk_overlap,
                separators=["```", "\n## ", "\n# ", "\n\n", "\n"]
            ),
            'code': FixedSizeChunkingStrategy(
                chunk_size=800,  # 代码适合较小的分块
                chunk_overlap=50
            ),
            'default': SemanticChunkingStrategy(
                threshold=config.semantic_chunk_threshold,
                window_size=config.semantic_chunk_similarity_window_size
            )
        }

    def chunk(self, documents: List[Document], **kwargs) -> List[Node]:
        """根据文件类型选择策略进行分块"""
        chunks = []

        for doc in documents:
            file_type = self._detect_file_type(doc.metadata)
            strategy = self.strategies.get(file_type, self.strategies['default'])

            doc_chunks = strategy.chunk([doc])
            # 添加文件类型信息到元数据
            for chunk in doc_chunks:
                chunk.metadata['file_type'] = file_type
                chunks.append(chunk)

        return chunks

    def _detect_file_type(self, metadata: Dict[str, Any]) -> str:
        """检测文档类型"""
        file_name = metadata.get('file_name', '').lower()

        if file_name.endswith('.pdf'):
            return 'pdf'
        elif file_name.endswith(('.md', '.markdown')):
            return 'markdown'
        elif file_name.endswith(('.py', '.js', '.java', '.cpp', '.c', '.cs')):
            return 'code'
        else:
            return 'default'

    def get_strategy_info(self) -> Dict[str, Any]:
        return {
            "strategy": "file_type_aware",
            "file_type_strategies": {
                ft: strategy.get_strategy_info()
                for ft, strategy in self.strategies.items()
            },
            "description": "文件类型感知分块 - 根据文件类型选择最适合的策略"
        }