"""
高级文档分块器

统一的分块接口，整合多种分块策略和质量分析功能。
"""

from typing import List, Dict, Optional, Any
from concurrent.futures import ThreadPoolExecutor, as_completed

from llama_index.core.schema import Document, Node

from ..config import config, ChunkStrategy
from .strategies import (
    BaseChunkingStrategy,
    FixedSizeChunkingStrategy,
    SemanticChunkingStrategy,
    RecursiveChunkingStrategy,
    HierarchicalChunkingStrategy,
    FileTypeAwareStrategy
)
from .quality import ChunkingQualityAnalyzer


class AdvancedDocumentChunker:
    """
    高级文档分块器

    整合多种前沿分块策略，提供统一的文档分块接口和质量分析。

    主要功能：
    - 支持多种分块策略（固定大小、语义、递归、层次、文件类型感知）
    - 并发分块处理
    - 分块质量分析和评估
    - 详细的统计信息和改进建议
    - 文件类型特定优化
    """

    def __init__(self, strategy: Optional[ChunkStrategy] = None):
        """
        初始化高级文档分块器

        Args:
            strategy: 分块策略，默认从配置读取
        """
        self.strategy = strategy or config.chunk_strategy
        self.quality_analyzer = ChunkingQualityAnalyzer()
        self.strategies = self._initialize_strategies()

    def _initialize_strategies(self) -> Dict[ChunkStrategy, BaseChunkingStrategy]:
        """初始化所有分块策略"""
        return {
            ChunkStrategy.FIXED: FixedSizeChunkingStrategy(
                chunk_size=config.chunk_size,
                chunk_overlap=config.chunk_overlap
            ),
            ChunkStrategy.SEMANTIC: SemanticChunkingStrategy(
                threshold=config.semantic_chunk_threshold,
                window_size=config.semantic_chunk_similarity_window_size,
                min_chunk_size=config.min_chunk_size,
                max_chunk_size=config.max_chunk_size
            ),
            ChunkStrategy.RECURSIVE: RecursiveChunkingStrategy(
                chunk_size=config.chunk_size,
                chunk_overlap=config.chunk_overlap
            ),
            ChunkStrategy.HIERARCHICAL: HierarchicalChunkingStrategy()
        }

    def chunk_documents(
        self,
        documents: List[Document],
        max_workers: Optional[int] = None,
        strategy: Optional[ChunkStrategy] = None,
        analyze_quality: bool = True
    ) -> Dict[str, Any]:
        """
        使用指定策略对文档进行分块处理

        Args:
            documents: 原始文档列表
            max_workers: 最大工作线程数
            strategy: 分块策略，覆盖默认策略
            analyze_quality: 是否进行质量分析

        Returns:
            Dict[str, Any]: 包含分块结果和分析的完整报告
        """
        if not documents:
            return {
                "chunks": [],
                "statistics": self._empty_statistics(),
                "quality_analysis": None,
                "success": False,
                "message": "没有文档需要处理"
            }

        chunk_strategy = strategy or self.strategy
        chunking_strategy = self.strategies[chunk_strategy]
        max_workers = max_workers or config.max_concurrent_chunking

        print(f"正在使用 {chunk_strategy.value} 分块策略处理 {len(documents)} 个文档...")
        print(f"策略配置: {chunking_strategy.get_strategy_info()}")

        # 执行分块
        try:
            chunks = self._perform_chunking(
                documents, chunking_strategy, max_workers
            )

            print(f"分块完成，生成 {len(chunks)} 个文档片段")

            # 生成统计信息
            statistics = self._generate_statistics(documents, chunks, chunking_strategy)

            # 质量分析
            quality_analysis = None
            if analyze_quality:
                print("正在进行质量分析...")
                quality_analysis = self.quality_analyzer.analyze_chunking_quality(
                    documents, chunks
                )

            return {
                "chunks": chunks,
                "statistics": statistics,
                "quality_analysis": quality_analysis,
                "success": True,
                "message": f"成功生成 {len(chunks)} 个分块"
            }

        except Exception as e:
            print(f"分块处理失败: {e}")
            return {
                "chunks": [],
                "statistics": self._empty_statistics(),
                "quality_analysis": None,
                "success": False,
                "message": f"分块失败: {str(e)}"
            }

    def _perform_chunking(
        self,
        documents: List[Document],
        strategy: BaseChunkingStrategy,
        max_workers: int
    ) -> List[Node]:
        """执行分块操作"""
        if len(documents) > 1 and max_workers > 1:
            return self._chunk_concurrently(documents, strategy, max_workers)
        else:
            return strategy.chunk(documents)

    def _chunk_concurrently(
        self,
        documents: List[Document],
        strategy: BaseChunkingStrategy,
        max_workers: int
    ) -> List[Node]:
        """并发分块处理"""
        chunks = []

        # 将文档分割为批次
        batch_size = max(1, len(documents) // max_workers)
        doc_batches = [
            documents[i:i + batch_size]
            for i in range(0, len(documents), batch_size)
        ]

        print(f"使用 {len(doc_batches)} 个批次并发处理...")

        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            future_to_batch = {
                executor.submit(strategy.chunk, batch): batch_idx
                for batch_idx, batch in enumerate(doc_batches)
            }

            for future in as_completed(future_to_batch):
                batch_idx = future_to_batch[future]
                try:
                    batch_chunks = future.result()
                    chunks.extend(batch_chunks)
                    print(f"批次 {batch_idx + 1} 完成，生成 {len(batch_chunks)} 个分块")
                except Exception as e:
                    print(f"批次 {batch_idx + 1} 处理失败: {e}")
                    # 降级为单线程处理
                    batch = doc_batches[batch_idx]
                    try:
                        fallback_chunks = strategy.chunk(batch)
                        chunks.extend(fallback_chunks)
                        print(f"批次 {batch_idx + 1} 降级处理成功")
                    except Exception as fallback_error:
                        print(f"批次 {batch_idx + 1} 降级处理也失败: {fallback_error}")

        return chunks

    def chunk_by_file_type(
        self,
        documents: List[Document],
        analyze_quality: bool = True
    ) -> Dict[str, Any]:
        """
        根据文件类型进行专门分块

        Args:
            documents: 文档列表
            analyze_quality: 是否进行质量分析

        Returns:
            Dict[str, Any]: 按文件类型分组的分块结果
        """
        print("使用文件类型感知策略进行分块...")

        strategy = FileTypeAwareStrategy()
        chunks = strategy.chunk(documents)

        # 按文件类型分组
        file_type_chunks = {}
        for chunk in chunks:
            file_type = chunk.metadata.get('file_type', 'unknown')
            if file_type not in file_type_chunks:
                file_type_chunks[file_type] = []
            file_type_chunks[file_type].append(chunk)

        # 生成统计信息
        statistics = {
            "total_chunks": len(chunks),
            "file_type_distribution": {
                ft: len(chunks_list) for ft, chunks_list in file_type_chunks.items()
            },
            "strategy_info": strategy.get_strategy_info()
        }

        # 质量分析
        quality_analysis = None
        if analyze_quality:
            quality_analysis = self.quality_analyzer.analyze_chunking_quality(
                documents, chunks
            )

        return {
            "chunks": chunks,
            "file_type_chunks": file_type_chunks,
            "statistics": statistics,
            "quality_analysis": quality_analysis,
            "success": True,
            "message": f"文件类型分块完成，共 {len(chunks)} 个分块"
        }

    def _generate_statistics(
        self,
        original_docs: List[Document],
        chunks: List[Node],
        strategy: BaseChunkingStrategy
    ) -> Dict[str, Any]:
        """生成详细统计信息"""
        total_original_length = sum(len(doc.text) for doc in original_docs)
        total_chunk_length = sum(len(chunk.text) for chunk in chunks)

        # 按策略统计
        strategy_stats = {}
        for chunk in chunks:
            chunk_strategy = chunk.metadata.get('chunking_strategy', 'unknown')
            if chunk_strategy not in strategy_stats:
                strategy_stats[chunk_strategy] = {'count': 0, 'total_length': 0}
            strategy_stats[chunk_strategy]['count'] += 1
            strategy_stats[chunk_strategy]['total_length'] += len(chunk.text)

        # 按文件类型统计
        file_type_stats = {}
        for chunk in chunks:
            file_type = chunk.metadata.get('file_type', 'unknown')
            if file_type not in file_type_stats:
                file_type_stats[file_type] = {'count': 0, 'total_length': 0}
            file_type_stats[file_type]['count'] += 1
            file_type_stats[file_type]['total_length'] += len(chunk.text)

        # 分块大小分布
        chunk_sizes = [len(chunk.text) for chunk in chunks]
        import numpy as np

        size_stats = {
            'min_size': min(chunk_sizes) if chunk_sizes else 0,
            'max_size': max(chunk_sizes) if chunk_sizes else 0,
            'avg_size': float(np.mean(chunk_sizes)) if chunk_sizes else 0,
            'median_size': float(np.median(chunk_sizes)) if chunk_sizes else 0,
            'std_dev': float(np.std(chunk_sizes)) if chunk_sizes else 0
        }

        # 层次统计
        hierarchy_stats = {}
        for chunk in chunks:
            level = chunk.metadata.get('hierarchy_level', 'standard')
            if level not in hierarchy_stats:
                hierarchy_stats[level] = 0
            hierarchy_stats[level] += 1

        return {
            "original_documents": len(original_docs),
            "total_chunks": len(chunks),
            "original_total_chars": total_original_length,
            "chunk_total_chars": total_chunk_length,
            "compression_ratio": round(total_chunk_length / total_original_length, 2) if total_original_length > 0 else 0,
            "strategy_distribution": strategy_stats,
            "file_type_distribution": file_type_stats,
            "chunk_size_stats": size_stats,
            "hierarchy_distribution": hierarchy_stats,
            "strategy_info": strategy.get_strategy_info()
        }

    def _empty_statistics(self) -> Dict[str, Any]:
        """返回空统计信息"""
        return {
            "original_documents": 0,
            "total_chunks": 0,
            "original_total_chars": 0,
            "chunk_total_chars": 0,
            "compression_ratio": 0,
            "strategy_distribution": {},
            "file_type_distribution": {},
            "chunk_size_stats": {
                'min_size': 0,
                'max_size': 0,
                'avg_size': 0,
                'median_size': 0,
                'std_dev': 0
            },
            "hierarchy_distribution": {},
            "strategy_info": {}
        }

    def get_supported_strategies(self) -> Dict[ChunkStrategy, Dict[str, Any]]:
        """获取所有支持的策略信息"""
        return {
            strategy: chunking_strategy.get_strategy_info()
            for strategy, chunking_strategy in self.strategies.items()
        }

    def set_strategy(self, strategy: ChunkStrategy) -> None:
        """
        设置默认分块策略

        Args:
            strategy: 新的分块策略
        """
        if strategy not in self.strategies:
            raise ValueError(f"不支持的策略: {strategy}")

        old_strategy = self.strategy
        self.strategy = strategy
        print(f"分块策略已从 {old_strategy.value} 更改为 {strategy.value}")

    def validate_chunking_result(
        self,
        chunks: List[Node],
        strict: bool = False
    ) -> Dict[str, Any]:
        """
        验证分块结果的有效性

        Args:
            chunks: 分块列表
            strict: 是否使用严格模式验证

        Returns:
            Dict[str, Any]: 验证结果
        """
        if not chunks:
            return {
                "valid": False,
                "issues": ["分块列表为空"],
                "warnings": []
            }

        issues = []
        warnings = []

        # 检查基本要求
        for i, chunk in enumerate(chunks):
            chunk_id = chunk.metadata.get("source_doc_id", f"chunk_{i}")

            # 检查文本内容
            if not chunk.text.strip():
                issues.append(f"分块 {chunk_id} 为空")

            # 检查元数据
            if "chunk_index" not in chunk.metadata:
                warnings.append(f"分块 {chunk_id} 缺少索引信息")

            if "source_doc_id" not in chunk.metadata:
                warnings.append(f"分块 {chunk_id} 缺少源文档信息")

        # 检查层次结构（如果有）
        parent_chunks = [
            chunk for chunk in chunks
            if chunk.metadata.get("hierarchy_level") == "parent"
        ]
        child_chunks = [
            chunk for chunk in chunks
            if chunk.metadata.get("hierarchy_level") == "child"
        ]

        if parent_chunks and child_chunks:
            # 验证层次关系
            parent_ids = {chunk.metadata.get("source_doc_id") for chunk in parent_chunks}
            child_parent_ids = {
                chunk.metadata.get("parent_chunk_id") for chunk in child_chunks
                if chunk.metadata.get("parent_chunk_id")
            }

            missing_parents = child_parent_ids - parent_ids
            if missing_parents:
                warnings.append(f"发现 {len(missing_parents)} 个子级分块引用了不存在的父级分块")

        # 严格模式下的额外检查
        if strict:
            # 检查分块大小
            for i, chunk in enumerate(chunks):
                chunk_size = len(chunk.text)
                if chunk_size < config.min_chunk_size:
                    issues.append(f"分块 {i} 过小: {chunk_size} < {config.min_chunk_size}")
                elif chunk_size > config.max_chunk_size:
                    issues.append(f"分块 {i} 过大: {chunk_size} > {config.max_chunk_size}")

        is_valid = len(issues) == 0

        return {
            "valid": is_valid,
            "issues": issues,
            "warnings": warnings,
            "summary": f"验证完成: {len(chunks)} 个分块，{len(issues)} 个错误，{len(warnings)} 个警告"
        }

    def get_chunker_info(self) -> Dict[str, Any]:
        """获取分块器配置信息"""
        return {
            "current_strategy": self.strategy.value,
            "available_strategies": list(self.strategies.keys()),
            "strategy_info": self.strategies[self.strategy].get_strategy_info(),
            "concurrent_processing": config.max_concurrent_chunking,
            "chunk_size_range": f"{config.min_chunk_size}-{config.max_chunk_size}",
            "chunk_overlap": config.chunk_overlap,
            "semantic_threshold": config.semantic_chunk_threshold
        }

    def __str__(self) -> str:
        """返回分块器信息的字符串表示"""
        info = self.get_chunker_info()
        return f"""
=== 高级文档分块器配置 ===
当前策略: {info['current_strategy']}
策略信息: {info['strategy_info']}
并发处理: {info['concurrent_processing']}
分块大小范围: {info['chunk_size_range']}
重叠大小: {info['chunk_overlap']}
语义阈值: {info['semantic_threshold']}
        """.strip()


# 全局高级文档分块器实例
document_chunker = AdvancedDocumentChunker()