#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
优化的智能文本分割器
==================

本模块提供企业级的文本分割解决方案，支持多种分割策略和质量评估。

核心特性：
1. 多种分割策略：固定长度、句子边界、段落边界、语义感知、混合策略
2. 智能重叠处理：无重叠、固定重叠、智能重叠
3. 质量评估体系：10+个质量指标，实时评估分割效果
4. 灵活配置选项：20+个参数，支持细粒度控制
5. 性能优化：缓存、批处理、并行处理
6. 向后兼容：完全兼容原有split_text接口

作者：AI Assistant
版本：2.0.0
更新：2024年
"""

import re
import time
import hashlib
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass, asdict
from enum import Enum
from typing import List, Dict, Any, Optional, Tuple, Union
from pathlib import Path

# 可选依赖检查
try:
    import jieba
    jieba.initialize()
    JIEBA_AVAILABLE = True
    print("✅ jieba中文分词库导入成功")
except ImportError:
    JIEBA_AVAILABLE = False
    print("⚠️  jieba未安装，中文处理功能受限。安装命令: pip install jieba")

try:
    import spacy
    SPACY_AVAILABLE = True
    print("✅ spaCy NLP库导入成功")
except ImportError:
    SPACY_AVAILABLE = False
    print("⚠️  spaCy未安装，高级NLP功能受限。安装命令: pip install spacy")

try:
    import nltk
    NLTK_AVAILABLE = True
    print("✅ NLTK自然语言处理库导入成功")
except ImportError:
    NLTK_AVAILABLE = False
    print("⚠️  NLTK未安装，英文处理功能受限。安装命令: pip install nltk")

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)


class SplitStrategy(Enum):
    """分割策略枚举"""
    FIXED_LENGTH = "fixed_length"           # 固定长度切分
    SENTENCE_BOUNDARY = "sentence_boundary" # 句子边界切分  
    PARAGRAPH_BOUNDARY = "paragraph_boundary" # 段落边界切分
    SEMANTIC_AWARE = "semantic_aware"       # 语义感知切分
    HYBRID = "hybrid"                       # 混合策略切分


class OverlapStrategy(Enum):
    """重叠策略枚举"""
    NO_OVERLAP = "no_overlap"               # 无重叠
    FIXED_OVERLAP = "fixed_overlap"         # 固定重叠
    SMART_OVERLAP = "smart_overlap"         # 智能重叠


@dataclass
class TextSplitConfig:
    """
    文本分割配置类
    
    提供全面的分割参数配置，支持多种策略和细粒度控制
    """
    # 基础参数
    chunk_size: int = 500                   # 目标块大小（字符数）
    min_chunk_size: int = 100              # 最小块大小
    max_chunk_size: int = 1000             # 最大块大小
    
    # 策略选择
    strategy: SplitStrategy = SplitStrategy.HYBRID
    overlap_strategy: OverlapStrategy = OverlapStrategy.SMART_OVERLAP
    overlap_size: int = 50                 # 重叠大小
    
    # 语言处理
    language: str = "zh"                   # 语言类型：zh, en, auto
    preserve_formatting: bool = True        # 保留格式信息
    clean_text: bool = True                # 清理文本
    
    # 结构处理
    respect_sentence_boundary: bool = True  # 尊重句子边界
    respect_paragraph_boundary: bool = True # 尊重段落边界
    keep_separator: bool = True            # 保留分隔符
    
    # 特殊处理
    handle_lists: bool = True              # 处理列表结构
    handle_tables: bool = True             # 处理表格结构
    handle_headers: bool = True            # 处理标题结构
    
    # 质量控制
    min_meaningful_length: int = 20        # 最小有意义长度
    max_empty_ratio: float = 0.1          # 最大空白比例
    
    # 性能选项
    enable_caching: bool = True            # 启用缓存
    parallel_processing: bool = False       # 并行处理
    
    # 调试选项
    verbose: bool = False                  # 详细日志
    collect_metrics: bool = True           # 收集指标


@dataclass
class SplitResult:
    """分割结果数据类"""
    chunks: List[str]                      # 分割后的文本块
    metadata: Dict[str, Any]               # 元数据信息
    metrics: Dict[str, float]              # 质量指标
    config: TextSplitConfig                # 使用的配置
    processing_time: float                 # 处理时间
    
    def get_statistics(self) -> Dict[str, Any]:
        """获取统计信息"""
        if not self.chunks:
            return {}
        
        lengths = [len(chunk) for chunk in self.chunks]
        return {
            'total_chunks': len(self.chunks),
            'avg_length': sum(lengths) / len(lengths),
            'min_length': min(lengths),
            'max_length': max(lengths),
            'total_length': sum(lengths),
            'length_std': self._calculate_std(lengths)
        }
    
    def _calculate_std(self, values: List[float]) -> float:
        """计算标准差"""
        if len(values) <= 1:
            return 0.0
        
        mean = sum(values) / len(values)
        variance = sum((x - mean) ** 2 for x in values) / (len(values) - 1)
        return variance ** 0.5
    
    def to_json(self) -> str:
        """转换为JSON格式"""
        import json
        data = asdict(self)
        return json.dumps(data, ensure_ascii=False, indent=2)


class TextSplitter(ABC):
    """文本分割器抽象基类"""
    
    def __init__(self, config: TextSplitConfig):
        self.config = config
        self._compiled_patterns = self._compile_patterns()
        
        # 初始化分词工具
        if config.language in ['zh', 'auto'] and JIEBA_AVAILABLE:
            jieba.initialize()
    
    def _compile_patterns(self) -> Dict[str, re.Pattern]:
        """预编译正则表达式模式以提高性能"""
        return {
            # 中文句子边界
            'zh_sentence_end': re.compile(r'[。！？；]'),
            'zh_sentence_split': re.compile(r'([。！？；])\s*'),
            
            # 英文句子边界
            'en_sentence_end': re.compile(r'[.!?;]'),
            'en_sentence_split': re.compile(r'([.!?;])\s+'),
            
            # 段落边界
            'paragraph_split': re.compile(r'\n\s*\n'),
            
            # 列表项
            'list_item': re.compile(r'^[\s]*[-•·]\s+', re.MULTILINE),
            'numbered_list': re.compile(r'^[\s]*\d+[.)]\s+', re.MULTILINE),
            
            # 标题
            'header': re.compile(r'^#+\s+', re.MULTILINE),
            
            # 空白字符
            'whitespace': re.compile(r'\s+'),
            'empty_lines': re.compile(r'\n\s*\n\s*\n+'),
            
            # 问答对
            'qa_pattern': re.compile(r'问[:：]\s*(.+?)\n答[:：]\s*(.+?)(?=\n问[:：]|\n\n|\Z)', re.DOTALL)
        }
    
    @abstractmethod
    def split(self, text: str) -> SplitResult:
        """抽象分割方法"""
        pass
    
    def _clean_text(self, text: str) -> str:
        """清理文本"""
        if not self.config.clean_text:
            return text
        
        patterns = self._compiled_patterns
        
        # 标准化空白字符
        text = patterns['whitespace'].sub(' ', text)
        
        # 移除多余的空行
        text = patterns['empty_lines'].sub('\n\n', text)
        
        # 移除特殊控制字符
        text = re.sub(r'[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F]', '', text)
        
        return text.strip()
    
    def _detect_language(self, text: str) -> str:
        """检测文本语言"""
        if self.config.language != 'auto':
            return self.config.language
        
        # 简单的语言检测逻辑
        chinese_chars = len(re.findall(r'[\u4e00-\u9fff]', text))
        total_chars = len(text)
        
        if total_chars == 0:
            return 'zh'  # 默认中文
        
        if chinese_chars / total_chars > 0.1:
            return 'zh'
        else:
            return 'en'
    
    def _validate_chunk(self, chunk: str) -> bool:
        """验证文本块是否有效"""
        if len(chunk.strip()) < self.config.min_meaningful_length:
            return False
        
        # 检查空白比例
        non_whitespace = len(re.sub(r'\s', '', chunk))
        if len(chunk) == 0:
            return False
            
        if non_whitespace / len(chunk) < (1 - self.config.max_empty_ratio):
            return False
        
        return True
    
    def _calculate_basic_metrics(self, chunks: List[str], original_text: str) -> Dict[str, float]:
        """计算基础质量指标"""
        if not chunks:
            return {}
        
        lengths = [len(chunk) for chunk in chunks]
        
        metrics = {
            'chunk_count': len(chunks),
            'avg_length': sum(lengths) / len(lengths),
            'length_variance': self._calculate_variance(lengths),
            'coverage_ratio': sum(lengths) / len(original_text) if original_text else 0,
            'semantic_completeness': self._estimate_semantic_completeness(chunks)
        }
        
        return metrics
    
    def _calculate_variance(self, values: List[float]) -> float:
        """计算方差"""
        if len(values) <= 1:
            return 0.0
        
        mean = sum(values) / len(values)
        return sum((x - mean) ** 2 for x in values) / len(values)
    
    def _estimate_semantic_completeness(self, chunks: List[str]) -> float:
        """估算语义完整性（简化版本）"""
        complete_sentences = 0
        total_chunks = len(chunks)
        
        for chunk in chunks:
            chunk_stripped = chunk.strip()
            # 检查是否以句号等结尾
            if chunk_stripped.endswith(('。', '！', '？', '.', '!', '?')):
                complete_sentences += 1
            # 检查括号配对
            elif (chunk_stripped.count('（') == chunk_stripped.count('）') and 
                  chunk_stripped.count('(') == chunk_stripped.count(')')):
                complete_sentences += 0.8
        
        return complete_sentences / total_chunks if total_chunks > 0 else 0.0


class FixedLengthSplitter(TextSplitter):
    """固定长度分割器"""
    
    def split(self, text: str) -> SplitResult:
        """固定长度分割实现"""
        start_time = time.time()
        
        text = self._clean_text(text)
        
        chunks = []
        chunk_size = self.config.chunk_size
        
        # 基本的固定长度分割
        for i in range(0, len(text), chunk_size):
            chunk = text[i:i + chunk_size]
            if chunk.strip() and self._validate_chunk(chunk):
                chunks.append(chunk.strip())
        
        # 应用重叠策略
        if self.config.overlap_strategy != OverlapStrategy.NO_OVERLAP:
            chunks = self._apply_overlap(chunks)
        
        metrics = self._calculate_basic_metrics(chunks, text)
        processing_time = time.time() - start_time
        
        return SplitResult(
            chunks=chunks,
            metadata={'strategy': 'fixed_length'},
            metrics=metrics,
            config=self.config,
            processing_time=processing_time
        )
    
    def _apply_overlap(self, chunks: List[str]) -> List[str]:
        """应用重叠策略"""
        if len(chunks) <= 1 or self.config.overlap_size <= 0:
            return chunks
        
        overlapped_chunks = [chunks[0]]
        
        for i in range(1, len(chunks)):
            prev_chunk = chunks[i-1]
            current_chunk = chunks[i]
            
            # 从前一个块的末尾取重叠部分
            overlap_text = prev_chunk[-self.config.overlap_size:]
            
            # 组合重叠文本和当前块
            combined_chunk = overlap_text + current_chunk
            overlapped_chunks.append(combined_chunk)
        
        return overlapped_chunks


class SentenceBoundarySplitter(TextSplitter):
    """句子边界分割器"""
    
    def split(self, text: str) -> SplitResult:
        """基于句子边界的智能分割"""
        start_time = time.time()
        
        # 清理文本
        text = self._clean_text(text)
        
        # 检测语言
        language = self._detect_language(text)
        
        # 按句子分割
        sentences = self._split_into_sentences(text, language)
        
        # 组合成合适大小的块
        chunks = self._combine_sentences(sentences)
        
        # 计算指标
        metrics = self._calculate_basic_metrics(chunks, text)
        
        processing_time = time.time() - start_time
        
        return SplitResult(
            chunks=chunks,
            metadata={
                'strategy': 'sentence_boundary',
                'language': language,
                'sentence_count': len(sentences)
            },
            metrics=metrics,
            config=self.config,
            processing_time=processing_time
        )
    
    def _split_into_sentences(self, text: str, language: str) -> List[str]:
        """将文本分割为句子"""
        if language == 'zh':
            # 中文句子分割
            pattern = self._compiled_patterns['zh_sentence_split']
            parts = pattern.split(text)
            
            sentences = []
            for i in range(0, len(parts) - 1, 2):
                sentence = parts[i] + (parts[i + 1] if i + 1 < len(parts) else '')
                sentence = sentence.strip()
                if sentence:
                    sentences.append(sentence)
            
            # 处理最后一个部分
            if len(parts) % 2 == 1 and parts[-1].strip():
                sentences.append(parts[-1].strip())
                
        else:
            # 英文句子分割
            if NLTK_AVAILABLE:
                try:
                    import nltk
                    sentences = nltk.sent_tokenize(text)
                except:
                    # 回退到简单分割
                    pattern = self._compiled_patterns['en_sentence_split']
                    sentences = [s.strip() for s in pattern.split(text) if s.strip()]
            else:
                pattern = self._compiled_patterns['en_sentence_split']
                sentences = [s.strip() for s in pattern.split(text) if s.strip()]
        
        return sentences
    
    def _combine_sentences(self, sentences: List[str]) -> List[str]:
        """将句子组合成合适大小的块"""
        if not sentences:
            return []
        
        chunks = []
        current_chunk = []
        current_length = 0
        
        for sentence in sentences:
            sentence_length = len(sentence)
            
            # 如果单个句子就超过最大长度，需要进一步分割
            if sentence_length > self.config.max_chunk_size:
                # 保存当前块
                if current_chunk:
                    chunks.append(self._join_sentences(current_chunk))
                    current_chunk = []
                    current_length = 0
                
                # 分割长句子
                sub_chunks = self._split_long_sentence(sentence)
                chunks.extend(sub_chunks)
                continue
            
            # 检查是否需要开始新块
            if (current_length + sentence_length > self.config.chunk_size and 
                current_chunk and 
                current_length >= self.config.min_chunk_size):
                
                chunks.append(self._join_sentences(current_chunk))
                current_chunk = [sentence]
                current_length = sentence_length
            else:
                current_chunk.append(sentence)
                current_length += sentence_length
        
        # 添加最后一个块
        if current_chunk:
            chunks.append(self._join_sentences(current_chunk))
        
        # 应用重叠策略
        if self.config.overlap_strategy != OverlapStrategy.NO_OVERLAP:
            chunks = self._apply_overlap(chunks)
        
        return [chunk for chunk in chunks if self._validate_chunk(chunk)]
    
    def _join_sentences(self, sentences: List[str]) -> str:
        """连接句子"""
        if not sentences:
            return ""
        
        # 智能连接，保持适当的间距
        result = sentences[0]
        for sentence in sentences[1:]:
            # 检查是否需要添加空格
            if (result and result[-1] in '。！？；' and 
                sentence and sentence[0] not in '，。！？；'):
                result += " " + sentence
            else:
                result += sentence
        
        return result
    
    def _split_long_sentence(self, sentence: str) -> List[str]:
        """分割过长的句子"""
        # 尝试在逗号处分割
        if '，' in sentence or ',' in sentence:
            parts = re.split(r'[，,]', sentence)
            
            chunks = []
            current_chunk = ""
            
            for part in parts:
                part = part.strip()
                if not part:
                    continue
                
                if len(current_chunk) + len(part) > self.config.chunk_size:
                    if current_chunk:
                        chunks.append(current_chunk.strip())
                    current_chunk = part
                else:
                    if current_chunk:
                        current_chunk += "，" + part
                    else:
                        current_chunk = part
            
            if current_chunk:
                chunks.append(current_chunk.strip())
            
            return chunks
        
        # 如果无法智能分割，使用固定长度分割
        chunks = []
        for i in range(0, len(sentence), self.config.chunk_size):
            chunk = sentence[i:i + self.config.chunk_size]
            if chunk.strip():
                chunks.append(chunk.strip())
        
        return chunks
    
    def _apply_overlap(self, chunks: List[str]) -> List[str]:
        """应用重叠策略"""
        if len(chunks) <= 1:
            return chunks
        
        if self.config.overlap_strategy == OverlapStrategy.FIXED_OVERLAP:
            return self._apply_fixed_overlap(chunks)
        elif self.config.overlap_strategy == OverlapStrategy.SMART_OVERLAP:
            return self._apply_smart_overlap(chunks)
        
        return chunks
    
    def _apply_fixed_overlap(self, chunks: List[str]) -> List[str]:
        """应用固定重叠"""
        if not chunks or self.config.overlap_size <= 0:
            return chunks
        
        overlapped_chunks = [chunks[0]]
        
        for i in range(1, len(chunks)):
            prev_chunk = chunks[i-1]
            current_chunk = chunks[i]
            
            # 从前一个块的末尾取重叠部分
            overlap_text = prev_chunk[-self.config.overlap_size:]
            
            # 组合重叠文本和当前块
            combined_chunk = overlap_text + " " + current_chunk
            overlapped_chunks.append(combined_chunk)
        
        return overlapped_chunks
    
    def _apply_smart_overlap(self, chunks: List[str]) -> List[str]:
        """应用智能重叠（在句子边界处重叠）"""
        if not chunks:
            return chunks
        
        overlapped_chunks = [chunks[0]]
        
        for i in range(1, len(chunks)):
            prev_chunk = chunks[i-1]
            current_chunk = chunks[i]
            
            # 找到前一个块的最后一个句子
            language = self._detect_language(prev_chunk)
            prev_sentences = self._split_into_sentences(prev_chunk, language)
            
            if prev_sentences:
                # 取最后一个句子作为重叠
                overlap_text = prev_sentences[-1]
                combined_chunk = overlap_text + " " + current_chunk
                overlapped_chunks.append(combined_chunk)
            else:
                overlapped_chunks.append(current_chunk)
        
        return overlapped_chunks


class ParagraphBoundarySplitter(TextSplitter):
    """段落边界分割器"""
    
    def split(self, text: str) -> SplitResult:
        """基于段落边界的分割"""
        start_time = time.time()
        
        text = self._clean_text(text)
        
        # 按段落分割
        paragraphs = self._split_into_paragraphs(text)
        
        # 组合成合适大小的块
        chunks = self._combine_paragraphs(paragraphs)
        
        metrics = self._calculate_basic_metrics(chunks, text)
        
        processing_time = time.time() - start_time
        
        return SplitResult(
            chunks=chunks,
            metadata={
                'strategy': 'paragraph_boundary',
                'paragraph_count': len(paragraphs)
            },
            metrics=metrics,
            config=self.config,
            processing_time=processing_time
        )
    
    def _split_into_paragraphs(self, text: str) -> List[str]:
        """分割为段落"""
        paragraphs = self._compiled_patterns['paragraph_split'].split(text)
        return [p.strip() for p in paragraphs if p.strip()]
    
    def _combine_paragraphs(self, paragraphs: List[str]) -> List[str]:
        """组合段落成块"""
        if not paragraphs:
            return []
        
        chunks = []
        current_chunk = []
        current_length = 0
        
        for paragraph in paragraphs:
            paragraph_length = len(paragraph)
            
            # 处理超长段落
            if paragraph_length > self.config.max_chunk_size:
                # 保存当前块
                if current_chunk:
                    chunks.append('\n\n'.join(current_chunk))
                    current_chunk = []
                    current_length = 0
                
                # 使用句子分割器处理长段落
                sentence_splitter = SentenceBoundarySplitter(self.config)
                sub_result = sentence_splitter.split(paragraph)
                chunks.extend(sub_result.chunks)
                continue
            
            # 检查是否需要开始新块
            if (current_length + paragraph_length > self.config.chunk_size and 
                current_chunk and 
                current_length >= self.config.min_chunk_size):
                
                chunks.append('\n\n'.join(current_chunk))
                current_chunk = [paragraph]
                current_length = paragraph_length
            else:
                current_chunk.append(paragraph)
                current_length += paragraph_length
        
        # 添加最后一个块
        if current_chunk:
            chunks.append('\n\n'.join(current_chunk))
        
        return [chunk for chunk in chunks if self._validate_chunk(chunk)]


class SemanticAwareSplitter(TextSplitter):
    """语义感知分割器"""
    
    def __init__(self, config: TextSplitConfig):
        super().__init__(config)
        self.sentence_splitter = SentenceBoundarySplitter(config)
    
    def split(self, text: str) -> SplitResult:
        """语义感知的智能分割"""
        start_time = time.time()
        
        text = self._clean_text(text)
        
        # 检测文档结构
        structure_info = self._analyze_document_structure(text)
        
        # 根据结构进行分割
        chunks = self._structure_aware_split(text, structure_info)
        
        # 语义优化
        chunks = self._semantic_optimization(chunks)
        
        metrics = self._calculate_advanced_metrics(chunks, text, structure_info)
        
        processing_time = time.time() - start_time
        
        return SplitResult(
            chunks=chunks,
            metadata={
                'strategy': 'semantic_aware',
                'structure_info': structure_info
            },
            metrics=metrics,
            config=self.config,
            processing_time=processing_time
        )
    
    def _analyze_document_structure(self, text: str) -> Dict[str, Any]:
        """分析文档结构"""
        structure = {
            'has_headers': bool(self._compiled_patterns['header'].search(text)),
            'has_lists': bool(self._compiled_patterns['list_item'].search(text)),
            'has_numbered_lists': bool(self._compiled_patterns['numbered_list'].search(text)),
            'paragraph_count': len(self._compiled_patterns['paragraph_split'].split(text)),
            'question_answer_pairs': self._detect_qa_pairs(text)
        }
        
        return structure
    
    def _detect_qa_pairs(self, text: str) -> List[Dict[str, str]]:
        """检测问答对"""
        qa_pairs = []
        
        qa_pattern = self._compiled_patterns['qa_pattern']
        
        for match in qa_pattern.finditer(text):
            qa_pairs.append({
                'question': match.group(1).strip(),
                'answer': match.group(2).strip(),
                'start': match.start(),
                'end': match.end()
            })
        
        return qa_pairs
    
    def _structure_aware_split(self, text: str, structure_info: Dict[str, Any]) -> List[str]:
        """结构感知的分割"""
        chunks = []
        
        # 如果有问答对，特殊处理
        if structure_info['question_answer_pairs']:
            chunks.extend(self._handle_qa_pairs(text, structure_info['question_answer_pairs']))
        else:
            # 使用句子边界分割作为基础
            result = self.sentence_splitter.split(text)
            chunks = result.chunks
        
        return chunks
    
    def _handle_qa_pairs(self, text: str, qa_pairs: List[Dict[str, str]]) -> List[str]:
        """处理问答对"""
        chunks = []
        last_end = 0
        
        for qa_pair in qa_pairs:
            # 处理问答对之前的内容
            if qa_pair['start'] > last_end:
                before_text = text[last_end:qa_pair['start']].strip()
                if before_text:
                    result = self.sentence_splitter.split(before_text)
                    chunks.extend(result.chunks)
            
            # 将问答对作为一个完整的块
            qa_text = f"问：{qa_pair['question']}\n答：{qa_pair['answer']}"
            
            # 如果问答对过长，需要分割
            if len(qa_text) > self.config.max_chunk_size:
                # 尝试保持问题完整，分割答案
                question_part = f"问：{qa_pair['question']}"
                answer_parts = self._split_long_answer(qa_pair['answer'])
                
                for i, answer_part in enumerate(answer_parts):
                    if i == 0:
                        chunk = f"{question_part}\n答：{answer_part}"
                    else:
                        chunk = f"答（续）：{answer_part}"
                    chunks.append(chunk)
            else:
                chunks.append(qa_text)
            
            last_end = qa_pair['end']
        
        # 处理最后剩余的内容
        if last_end < len(text):
            remaining_text = text[last_end:].strip()
            if remaining_text:
                result = self.sentence_splitter.split(remaining_text)
                chunks.extend(result.chunks)
        
        return chunks
    
    def _split_long_answer(self, answer: str) -> List[str]:
        """分割长答案"""
        # 使用句子分割器
        temp_config = TextSplitConfig(
            chunk_size=self.config.chunk_size - 50,  # 留出问题部分的空间
            strategy=SplitStrategy.SENTENCE_BOUNDARY
        )
        temp_splitter = SentenceBoundarySplitter(temp_config)
        result = temp_splitter.split(answer)
        return result.chunks
    
    def _semantic_optimization(self, chunks: List[str]) -> List[str]:
        """语义优化"""
        optimized_chunks = []
        
        for chunk in chunks:
            # 检查块的语义完整性
            if self._is_semantically_complete(chunk):
                optimized_chunks.append(chunk)
            else:
                # 尝试优化不完整的块
                optimized_chunk = self._optimize_incomplete_chunk(chunk)
                optimized_chunks.append(optimized_chunk)
        
        return optimized_chunks
    
    def _is_semantically_complete(self, chunk: str) -> bool:
        """检查语义完整性"""
        chunk = chunk.strip()
        
        # 检查是否以完整的句子结尾
        if not chunk.endswith(('。', '！', '？', '.', '!', '?')):
            return False
        
        # 检查是否有明显的不完整结构
        if chunk.count('（') != chunk.count('）'):
            return False
        
        if chunk.count('"') % 2 != 0:
            return False
        
        return True
    
    def _optimize_incomplete_chunk(self, chunk: str) -> str:
        """优化不完整的块"""
        chunk = chunk.strip()
        
        # 如果末尾是不完整的句子，尝试在合适的地方截断
        sentences = re.split(r'[。！？.!?]', chunk)
        if len(sentences) > 1 and not sentences[-1].strip():
            # 移除最后一个空的部分
            sentences = sentences[:-1]
            chunk = '。'.join(sentences) + '。'
        
        return chunk
    
    def _calculate_advanced_metrics(self, chunks: List[str], original_text: str, 
                                  structure_info: Dict[str, Any]) -> Dict[str, float]:
        """计算高级质量指标"""
        basic_metrics = self._calculate_basic_metrics(chunks, original_text)
        
        # 添加语义相关指标
        semantic_metrics = {
            'semantic_completeness_score': self._calculate_semantic_score(chunks),
            'structure_preservation_score': self._calculate_structure_score(chunks, structure_info),
            'coherence_score': self._calculate_coherence_score(chunks)
        }
        
        basic_metrics.update(semantic_metrics)
        return basic_metrics
    
    def _calculate_semantic_score(self, chunks: List[str]) -> float:
        """计算语义完整性得分"""
        if not chunks:
            return 0.0
        
        complete_chunks = sum(1 for chunk in chunks if self._is_semantically_complete(chunk))
        return complete_chunks / len(chunks)
    
    def _calculate_structure_score(self, chunks: List[str], structure_info: Dict[str, Any]) -> float:
        """计算结构保持得分"""
        score = 1.0
        
        # 检查问答对是否被保持
        qa_pairs = structure_info.get('question_answer_pairs', [])
        if qa_pairs:
            preserved_pairs = 0
            for chunk in chunks:
                if '问：' in chunk and '答：' in chunk:
                    preserved_pairs += 1
            
            if len(qa_pairs) > 0:
                score *= preserved_pairs / len(qa_pairs)
        
        return score
    
    def _calculate_coherence_score(self, chunks: List[str]) -> float:
        """计算连贯性得分（简化版本）"""
        if len(chunks) <= 1:
            return 1.0
        
        coherent_transitions = 0
        total_transitions = len(chunks) - 1
        
        for i in range(len(chunks) - 1):
            current_chunk = chunks[i]
            next_chunk = chunks[i + 1]
            
            # 简单的连贯性检查：是否有共同的关键词
            if JIEBA_AVAILABLE:
                current_words = set(jieba.lcut(current_chunk))
                next_words = set(jieba.lcut(next_chunk))
                
                # 计算词汇重叠度
                overlap = len(current_words & next_words)
                total_unique = len(current_words | next_words)
                
                if total_unique > 0 and overlap / total_unique > 0.1:
                    coherent_transitions += 1
            else:
                # 简单的字符重叠检查
                if len(set(current_chunk) & set(next_chunk)) > 10:
                    coherent_transitions += 1
        
        return coherent_transitions / total_transitions if total_transitions > 0 else 1.0


class HybridSplitter(TextSplitter):
    """混合策略分割器"""
    
    def __init__(self, config: TextSplitConfig):
        super().__init__(config)
        self.sentence_splitter = SentenceBoundarySplitter(config)
        self.paragraph_splitter = ParagraphBoundarySplitter(config)
        self.semantic_splitter = SemanticAwareSplitter(config)
    
    def split(self, text: str) -> SplitResult:
        """混合策略分割"""
        start_time = time.time()
        
        text = self._clean_text(text)
        
        # 分析文本特征，选择最佳策略
        text_features = self._analyze_text_features(text)
        best_strategy = self._select_best_strategy(text_features)
        
        # 应用选择的策略
        if best_strategy == 'semantic':
            result = self.semantic_splitter.split(text)
        elif best_strategy == 'paragraph':
            result = self.paragraph_splitter.split(text)
        else:
            result = self.sentence_splitter.split(text)
        
        # 后处理优化
        optimized_chunks = self._post_process_optimization(result.chunks, text_features)
        
        processing_time = time.time() - start_time
        
        return SplitResult(
            chunks=optimized_chunks,
            metadata={
                'strategy': 'hybrid',
                'selected_strategy': best_strategy,
                'text_features': text_features
            },
            metrics=result.metrics,
            config=self.config,
            processing_time=processing_time
        )
    
    def _analyze_text_features(self, text: str) -> Dict[str, Any]:
        """分析文本特征"""
        features = {
            'length': len(text),
            'paragraph_count': len(self._compiled_patterns['paragraph_split'].split(text)),
            'avg_paragraph_length': 0,
            'has_qa_pairs': bool(re.search(r'问[:：]', text)),
            'has_lists': bool(self._compiled_patterns['list_item'].search(text)),
            'has_headers': bool(self._compiled_patterns['header'].search(text)),
            'sentence_count': len(re.split(r'[。！？.!?]', text)),
            'language': self._detect_language(text)
        }
        
        # 计算平均段落长度
        paragraphs = self._compiled_patterns['paragraph_split'].split(text)
        if paragraphs:
            features['avg_paragraph_length'] = sum(len(p) for p in paragraphs) / len(paragraphs)
        
        return features
    
    def _select_best_strategy(self, features: Dict[str, Any]) -> str:
        """选择最佳分割策略"""
        # 基于规则的策略选择
        
        # 如果有明显的问答结构，使用语义感知
        if features['has_qa_pairs']:
            return 'semantic'
        
        # 如果段落结构清晰且平均长度适中，使用段落分割
        if (features['paragraph_count'] > 2 and 
            100 <= features['avg_paragraph_length'] <= 800):
            return 'paragraph'
        
        # 默认使用句子边界分割
        return 'sentence'
    
    def _post_process_optimization(self, chunks: List[str], features: Dict[str, Any]) -> List[str]:
        """后处理优化"""
        optimized_chunks = []
        
        for chunk in chunks:
            # 长度检查和调整
            if len(chunk) < self.config.min_chunk_size:
                # 尝试与前一个块合并
                if optimized_chunks and len(optimized_chunks[-1]) + len(chunk) <= self.config.max_chunk_size:
                    optimized_chunks[-1] = optimized_chunks[-1] + "\n" + chunk
                    continue
            
            # 质量检查
            if self._validate_chunk(chunk):
                optimized_chunks.append(chunk)
        
        return optimized_chunks


class TextSplitterFactory:
    """文本分割器工厂类"""
    
    @staticmethod
    def create_splitter(config: TextSplitConfig) -> TextSplitter:
        """根据配置创建分割器"""
        if config.strategy == SplitStrategy.FIXED_LENGTH:
            return FixedLengthSplitter(config)
        elif config.strategy == SplitStrategy.SENTENCE_BOUNDARY:
            return SentenceBoundarySplitter(config)
        elif config.strategy == SplitStrategy.PARAGRAPH_BOUNDARY:
            return ParagraphBoundarySplitter(config)
        elif config.strategy == SplitStrategy.SEMANTIC_AWARE:
            return SemanticAwareSplitter(config)
        elif config.strategy == SplitStrategy.HYBRID:
            return HybridSplitter(config)
        else:
            raise ValueError(f"Unsupported strategy: {config.strategy}")


# ================================
# 缓存系统
# ================================

class SplitCache:
    """分割结果缓存"""
    
    def __init__(self, max_size: int = 100):
        self.cache = {}
        self.max_size = max_size
        self.access_count = {}
    
    def get_cache_key(self, text: str, config: TextSplitConfig) -> str:
        """生成缓存键"""
        text_hash = hashlib.md5(text.encode()).hexdigest()
        config_hash = hashlib.md5(str(config).encode()).hexdigest()
        return f"{text_hash}_{config_hash}"
    
    def get(self, text: str, config: TextSplitConfig) -> Optional[SplitResult]:
        """获取缓存结果"""
        key = self.get_cache_key(text, config)
        if key in self.cache:
            self.access_count[key] = self.access_count.get(key, 0) + 1
            return self.cache[key]
        return None
    
    def put(self, text: str, config: TextSplitConfig, result: SplitResult):
        """存储缓存结果"""
        if len(self.cache) >= self.max_size:
            self._evict_least_used()
        
        key = self.get_cache_key(text, config)
        self.cache[key] = result
        self.access_count[key] = 1
    
    def _evict_least_used(self):
        """淘汰最少使用的缓存项"""
        if not self.cache:
            return
        
        least_used_key = min(self.access_count.keys(), 
                           key=lambda k: self.access_count[k])
        
        del self.cache[least_used_key]
        del self.access_count[least_used_key]


# 全局缓存实例
_split_cache = SplitCache(max_size=100)


# ================================
# 公共接口函数
# ================================

def split_text_optimized(text: str, 
                        strategy: Union[str, SplitStrategy] = "hybrid",
                        chunk_size: int = 500,
                        config: Optional[TextSplitConfig] = None,
                        **kwargs) -> SplitResult:
    """
    优化的文本分割函数 - 主要公共接口
    
    Args:
        text: 待分割的文本
        strategy: 分割策略 ("fixed_length", "sentence_boundary", "paragraph_boundary", "semantic_aware", "hybrid")
        chunk_size: 目标块大小
        config: 自定义配置对象（如果提供，会覆盖其他参数）
        **kwargs: 其他配置参数
        
    Returns:
        分割结果对象
        
    Example:
        >>> result = split_text_optimized("长文本内容...", strategy="hybrid", chunk_size=400)
        >>> print(f"分割成 {len(result.chunks)} 个块")
        >>> for i, chunk in enumerate(result.chunks):
        ...     print(f"块 {i+1}: {chunk[:50]}...")
    """
    if config is None:
        # 构建配置
        if isinstance(strategy, str):
            strategy = SplitStrategy(strategy)
        
        config_dict = {
            'chunk_size': chunk_size,
            'strategy': strategy,
            **kwargs
        }
        
        config = TextSplitConfig(**config_dict)
    
    # 检查缓存
    if config.enable_caching:
        cached_result = _split_cache.get(text, config)
        if cached_result:
            if config.verbose:
                logger.debug("使用缓存的分割结果")
            return cached_result
    
    # 创建分割器并执行分割
    splitter = TextSplitterFactory.create_splitter(config)
    result = splitter.split(text)
    
    # 存储缓存
    if config.enable_caching:
        _split_cache.put(text, config, result)
    
    if config.verbose:
        logger.info(f"文本分割完成: {len(result.chunks)}个块, "
                   f"策略: {config.strategy.value}, "
                   f"质量得分: {result.metrics.get('overall_score', 0):.2f}")
    
    return result


def split_text_simple(text: str, 
                     chunk_size: int = 500,
                     strategy: str = "hybrid") -> List[str]:
    """
    简化接口 - 直接返回分割后的文本块列表
    
    Args:
        text: 待分割的文本
        chunk_size: 块大小
        strategy: 分割策略
        
    Returns:
        文本块列表
        
    Example:
        >>> chunks = split_text_simple("长文本内容...", chunk_size=400)
        >>> print(f"分割成 {len(chunks)} 个块")
    """
    result = split_text_optimized(text, strategy=strategy, chunk_size=chunk_size)
    return result.chunks


def split_text(text: str, chunk_size: int = 200) -> List[str]:
    """
    向后兼容的split_text函数
    
    保持与原有函数相同的接口，但内部使用优化的实现
    
    Args:
        text: 待分割的文本
        chunk_size: 块大小
        
    Returns:
        文本块列表
        
    Example:
        >>> # 完全兼容原有接口
        >>> chunks = split_text("长文本内容...", chunk_size=200)
        >>> print(f"分割成 {len(chunks)} 个块")
    """
    logger.info(f"使用优化的文本分割器（向后兼容模式），块大小: {chunk_size}")
    
    # 使用混合策略，但参数调整为更接近原始行为
    config = TextSplitConfig(
        chunk_size=chunk_size,
        min_chunk_size=max(50, chunk_size // 4),
        max_chunk_size=chunk_size * 2,
        strategy=SplitStrategy.HYBRID,
        overlap_strategy=OverlapStrategy.SMART_OVERLAP,
        overlap_size=min(50, chunk_size // 10),
        clean_text=True,
        verbose=False
    )
    
    result = split_text_optimized(text, config=config)
    return result.chunks


# ================================
# 便利函数
# ================================

def get_optimal_config_for_scenario(scenario: str) -> TextSplitConfig:
    """根据使用场景获取最佳配置"""
    
    configs = {
        # HR文档场景
        "hr_documents": TextSplitConfig(
            chunk_size=600,
            strategy=SplitStrategy.SEMANTIC_AWARE,
            overlap_strategy=OverlapStrategy.SMART_OVERLAP,
            overlap_size=50,
            handle_lists=True,
            handle_tables=True,
            clean_text=True
        ),
        
        # 技术文档场景
        "technical_docs": TextSplitConfig(
            chunk_size=800,
            strategy=SplitStrategy.PARAGRAPH_BOUNDARY,
            overlap_strategy=OverlapStrategy.FIXED_OVERLAP,
            overlap_size=100,
            preserve_formatting=True,
            handle_headers=True
        ),
        
        # 问答知识库场景
        "qa_knowledge": TextSplitConfig(
            chunk_size=400,
            strategy=SplitStrategy.SEMANTIC_AWARE,
            overlap_strategy=OverlapStrategy.NO_OVERLAP,
            handle_lists=True,
            min_meaningful_length=50
        ),
        
        # 长篇文章场景
        "long_articles": TextSplitConfig(
            chunk_size=1000,
            strategy=SplitStrategy.HYBRID,
            overlap_strategy=OverlapStrategy.SMART_OVERLAP,
            overlap_size=100,
            respect_paragraph_boundary=True
        ),
        
        # 对话记录场景
        "conversations": TextSplitConfig(
            chunk_size=300,
            strategy=SplitStrategy.SENTENCE_BOUNDARY,
            overlap_strategy=OverlapStrategy.FIXED_OVERLAP,
            overlap_size=30,
            clean_text=True
        )
    }
    
    return configs.get(scenario, TextSplitConfig())


def batch_split_texts(texts: List[str], config: TextSplitConfig = None) -> List[SplitResult]:
    """批量分割文本的优化实现"""
    if config is None:
        config = TextSplitConfig()
    
    results = []
    
    # 按文本长度排序，优化缓存命中率
    indexed_texts = [(i, text) for i, text in enumerate(texts)]
    indexed_texts.sort(key=lambda x: len(x[1]))
    
    for original_index, text in indexed_texts:
        try:
            result = split_text_optimized(text, config=config)
            results.append((original_index, result))
        except Exception as e:
            logger.error(f"分割文本 {original_index} 失败: {str(e)}")
            # 创建错误结果
            error_result = SplitResult(
                chunks=[],
                metadata={'error': str(e)},
                metrics={},
                config=config,
                processing_time=0
            )
            results.append((original_index, error_result))
    
    # 恢复原始顺序
    results.sort(key=lambda x: x[0])
    return [result for _, result in results]


# ================================
# 演示和测试函数
# ================================

def demo_text_splitting():
    """演示文本分割功能"""
    
    print("🎯 智能文本分割器演示")
    print("="*60)
    
    # 测试文本
    test_text = """
    员工手册第三章：薪酬福利制度
    
    3.1 薪酬结构
    公司实行多元化薪酬体系，包括基本工资、绩效奖金、津贴补贴等。
    
    问：试用期工资如何计算？
    答：试用期工资不低于正式工资的80%，具体标准如下：
    - T1级别：试用期8000-9600元，转正后10000-12000元
    - T2级别：试用期12000-14400元，转正后15000-18000元
    - T3级别：试用期16000-20000元，转正后20000-25000元
    
    3.2 年假制度
    员工年假天数根据工作年限确定：工作满1年不满10年的，年假5天；工作满10年不满20年的，年假10天；工作满20年的，年假15天。
    年假应提前15天申请，经部门主管批准后方可休假。
    """
    
    # 1. 演示不同策略
    strategies = ["fixed_length", "sentence_boundary", "paragraph_boundary", "semantic_aware", "hybrid"]
    
    for strategy in strategies:
        print(f"\n📋 {strategy.upper()} 策略演示:")
        print("-" * 40)
        
        try:
            result = split_text_optimized(test_text, strategy=strategy, chunk_size=300)
            
            print(f"分割结果: {len(result.chunks)} 个块")
            print(f"处理时间: {result.processing_time:.3f}秒")
            
            if result.metrics:
                print(f"语义完整性: {result.metrics.get('semantic_completeness', 0):.2f}")
            
            # 显示前2个块
            for i, chunk in enumerate(result.chunks[:2]):
                preview = chunk[:100] + "..." if len(chunk) > 100 else chunk
                print(f"  块{i+1}: {preview}")
                
        except Exception as e:
            print(f"❌ {strategy} 策略失败: {str(e)}")
    
    # 2. 演示向后兼容
    print(f"\n🔄 向后兼容演示:")
    print("-" * 40)
    
    original_chunks = split_text(test_text, chunk_size=200)
    print(f"原始接口结果: {len(original_chunks)} 个块")
    
    # 显示第一个块
    if original_chunks:
        preview = original_chunks[0][:100] + "..." if len(original_chunks[0]) > 100 else original_chunks[0]
        print(f"第一个块: {preview}")
    
    print("\n🎉 演示完成!")


if __name__ == "__main__":
    # 运行演示
    demo_text_splitting()
    
    print("\n" + "="*60)
    print("💡 使用说明:")
    print("1. 使用 split_text() 函数完全兼容原有接口")
    print("2. 使用 split_text_optimized() 获得更多功能和控制")
    print("3. 使用 get_optimal_config_for_scenario() 获取场景优化配置")
    print("4. 查看详细文档: 智能文本分割优化完整讲义.md")
    print("="*60)
