"""文本分割器 - 语义感知的文档分块工具"""

import re
import asyncio
from typing import List, Dict, Any, Optional, Tuple
from dataclasses import dataclass
from loguru import logger

# 文本处理库
try:
    import nltk
    from nltk.tokenize import sent_tokenize, word_tokenize
    from nltk.corpus import stopwords
except ImportError as e:
    logger.warning(f"NLTK库导入失败: {e}")


@dataclass
class ChunkMetadata:
    """分块元数据"""
    chunk_id: str
    start_pos: int
    end_pos: int
    word_count: int
    sentence_count: int
    overlap_with_previous: int
    semantic_score: float
    language: str
    

class SemanticTextSplitter:
    """语义感知的文本分割器"""
    
    def __init__(self, 
                 chunk_size: int = 1000,
                 chunk_overlap: int = 200,
                 min_chunk_size: int = 100,
                 max_chunk_size: int = 2000,
                 language: str = 'chinese'):
        """
        初始化文本分割器
        
        Args:
            chunk_size: 目标分块大小（字符数）
            chunk_overlap: 分块重叠大小
            min_chunk_size: 最小分块大小
            max_chunk_size: 最大分块大小
            language: 主要语言
        """
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.min_chunk_size = min_chunk_size
        self.max_chunk_size = max_chunk_size
        self.language = language
        
        # 分割策略优先级
        self.split_strategies = [
            self._split_by_sections,      # 按章节分割
            self._split_by_paragraphs,    # 按段落分割
            self._split_by_sentences,     # 按句子分割
            self._split_by_words,         # 按词分割
            self._split_by_characters     # 按字符分割（最后手段）
        ]
        
        # 语义边界标识符
        self.section_patterns = [
            r'^#{1,6}\s+.+$',           # Markdown标题
            r'^第[一二三四五六七八九十\d]+[章节部分]',  # 中文章节
            r'^Chapter\s+\d+',          # 英文章节
            r'^\d+\.\s+.+$',           # 数字标题
            r'^[A-Z][A-Z\s]{2,}$'       # 全大写标题
        ]
        
        self.paragraph_separators = [
            r'\n\s*\n',                 # 双换行
            r'\n\s*[-=]{3,}\s*\n',      # 分隔线
            r'\n\s*\*{3,}\s*\n'         # 星号分隔
        ]
        
        # 句子结束标识符
        self.sentence_endings = {
            'chinese': r'[。！？；]',
            'english': r'[.!?;]',
            'mixed': r'[。！？；.!?;]'
        }
        
        # 停用词（用于语义评分）
        self.stop_words = set([
            '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个',
            '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好',
            'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of',
            'with', 'by', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have',
            'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could', 'should'
        ])
    
    async def split_text(self, text: str, 
                        metadata: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
        """分割文本为语义相关的分块"""
        try:
            if not text or len(text.strip()) < self.min_chunk_size:
                return [{
                    'content': text,
                    'metadata': ChunkMetadata(
                        chunk_id='chunk_0',
                        start_pos=0,
                        end_pos=len(text),
                        word_count=len(text.split()),
                        sentence_count=1,
                        overlap_with_previous=0,
                        semantic_score=1.0,
                        language=self.language
                    ).__dict__
                }]
            
            # 预处理文本
            processed_text = await self._preprocess_text(text)
            
            # 尝试不同的分割策略
            chunks = None
            for strategy in self.split_strategies:
                try:
                    chunks = await strategy(processed_text)
                    if chunks and self._validate_chunks(chunks):
                        break
                except Exception as e:
                    logger.warning(f"分割策略失败: {strategy.__name__} - {e}")
                    continue
            
            if not chunks:
                # 所有策略都失败，使用简单字符分割
                chunks = await self._split_by_characters(processed_text)
            
            # 后处理分块
            final_chunks = await self._post_process_chunks(chunks, text)
            
            logger.debug(f"文本分割完成: {len(text)} 字符 -> {len(final_chunks)} 个分块")
            return final_chunks
            
        except Exception as e:
            logger.error(f"文本分割失败: {e}")
            # 返回原始文本作为单个分块
            return [{
                'content': text,
                'metadata': {
                    'chunk_id': 'chunk_0',
                    'error': str(e)
                }
            }]
    
    async def _preprocess_text(self, text: str) -> str:
        """预处理文本"""
        try:
            # 标准化换行符
            text = re.sub(r'\r\n|\r', '\n', text)
            
            # 清理多余空白
            text = re.sub(r'[ \t]+', ' ', text)  # 多个空格/制表符
            text = re.sub(r'\n[ \t]+', '\n', text)  # 行首空白
            text = re.sub(r'[ \t]+\n', '\n', text)  # 行尾空白
            
            # 标准化段落分隔
            text = re.sub(r'\n{3,}', '\n\n', text)  # 多个换行符
            
            return text.strip()
            
        except Exception as e:
            logger.error(f"文本预处理失败: {e}")
            return text
    
    async def _split_by_sections(self, text: str) -> List[str]:
        """按章节分割"""
        try:
            sections = []
            current_section = ""
            
            lines = text.split('\n')
            
            for line in lines:
                # 检查是否是章节标题
                is_section_header = any(
                    re.match(pattern, line.strip(), re.MULTILINE)
                    for pattern in self.section_patterns
                )
                
                if is_section_header and current_section.strip():
                    # 保存当前章节
                    if len(current_section.strip()) >= self.min_chunk_size:
                        sections.append(current_section.strip())
                    current_section = line + '\n'
                else:
                    current_section += line + '\n'
            
            # 添加最后一个章节
            if current_section.strip():
                sections.append(current_section.strip())
            
            # 如果章节太大，进一步分割
            final_sections = []
            for section in sections:
                if len(section) <= self.max_chunk_size:
                    final_sections.append(section)
                else:
                    # 递归分割大章节
                    sub_chunks = await self._split_by_paragraphs(section)
                    final_sections.extend(sub_chunks)
            
            return final_sections if final_sections else [text]
            
        except Exception as e:
            logger.error(f"按章节分割失败: {e}")
            raise
    
    async def _split_by_paragraphs(self, text: str) -> List[str]:
        """按段落分割"""
        try:
            # 按段落分隔符分割
            paragraphs = []
            for separator in self.paragraph_separators:
                if re.search(separator, text):
                    paragraphs = re.split(separator, text)
                    break
            
            if not paragraphs:
                # 简单的双换行分割
                paragraphs = text.split('\n\n')
            
            # 清理空段落
            paragraphs = [p.strip() for p in paragraphs if p.strip()]
            
            # 合并小段落，分割大段落
            chunks = []
            current_chunk = ""
            
            for paragraph in paragraphs:
                # 如果当前分块加上新段落不超过大小限制
                if len(current_chunk) + len(paragraph) + 2 <= self.chunk_size:
                    if current_chunk:
                        current_chunk += '\n\n' + paragraph
                    else:
                        current_chunk = paragraph
                else:
                    # 保存当前分块
                    if current_chunk.strip():
                        chunks.append(current_chunk.strip())
                    
                    # 检查段落是否太大
                    if len(paragraph) > self.max_chunk_size:
                        # 分割大段落
                        sub_chunks = await self._split_by_sentences(paragraph)
                        chunks.extend(sub_chunks[:-1])  # 除了最后一个
                        current_chunk = sub_chunks[-1] if sub_chunks else ""
                    else:
                        current_chunk = paragraph
            
            # 添加最后一个分块
            if current_chunk.strip():
                chunks.append(current_chunk.strip())
            
            return chunks if chunks else [text]
            
        except Exception as e:
            logger.error(f"按段落分割失败: {e}")
            raise
    
    async def _split_by_sentences(self, text: str) -> List[str]:
        """按句子分割"""
        try:
            # 根据语言选择句子结束符
            ending_pattern = self.sentence_endings.get(self.language, 
                                                     self.sentence_endings['mixed'])
            
            # 分割句子
            sentences = re.split(f'({ending_pattern})', text)
            
            # 重新组合句子（包含标点）
            complete_sentences = []
            for i in range(0, len(sentences) - 1, 2):
                if i + 1 < len(sentences):
                    sentence = sentences[i] + sentences[i + 1]
                    if sentence.strip():
                        complete_sentences.append(sentence.strip())
            
            # 如果最后一个元素不是标点，也要添加
            if len(sentences) % 2 == 1 and sentences[-1].strip():
                complete_sentences.append(sentences[-1].strip())
            
            # 合并句子为合适大小的分块
            chunks = []
            current_chunk = ""
            
            for sentence in complete_sentences:
                if len(current_chunk) + len(sentence) + 1 <= self.chunk_size:
                    if current_chunk:
                        current_chunk += ' ' + sentence
                    else:
                        current_chunk = sentence
                else:
                    if current_chunk.strip():
                        chunks.append(current_chunk.strip())
                    current_chunk = sentence
            
            # 添加最后一个分块
            if current_chunk.strip():
                chunks.append(current_chunk.strip())
            
            return chunks if chunks else [text]
            
        except Exception as e:
            logger.error(f"按句子分割失败: {e}")
            raise
    
    async def _split_by_words(self, text: str) -> List[str]:
        """按词分割"""
        try:
            words = text.split()
            chunks = []
            current_chunk = []
            current_length = 0
            
            for word in words:
                word_length = len(word) + 1  # +1 for space
                
                if current_length + word_length <= self.chunk_size:
                    current_chunk.append(word)
                    current_length += word_length
                else:
                    if current_chunk:
                        chunks.append(' '.join(current_chunk))
                    current_chunk = [word]
                    current_length = len(word)
            
            # 添加最后一个分块
            if current_chunk:
                chunks.append(' '.join(current_chunk))
            
            return chunks if chunks else [text]
            
        except Exception as e:
            logger.error(f"按词分割失败: {e}")
            raise
    
    async def _split_by_characters(self, text: str) -> List[str]:
        """按字符分割（最后手段）"""
        try:
            chunks = []
            start = 0
            
            while start < len(text):
                end = start + self.chunk_size
                
                # 尝试在词边界分割
                if end < len(text):
                    # 向后查找空格
                    for i in range(end, min(end + 50, len(text))):
                        if text[i].isspace():
                            end = i
                            break
                
                chunk = text[start:end].strip()
                if chunk:
                    chunks.append(chunk)
                
                start = end - self.chunk_overlap if end > self.chunk_overlap else end
            
            return chunks if chunks else [text]
            
        except Exception as e:
            logger.error(f"按字符分割失败: {e}")
            return [text]
    
    def _validate_chunks(self, chunks: List[str]) -> bool:
        """验证分块质量"""
        try:
            if not chunks:
                return False
            
            # 检查分块大小
            for chunk in chunks:
                if len(chunk) > self.max_chunk_size * 1.2:  # 允许20%的超出
                    return False
                if len(chunk.strip()) < self.min_chunk_size * 0.5:  # 允许50%的不足
                    continue
            
            return True
            
        except Exception:
            return False
    
    async def _post_process_chunks(self, chunks: List[str], 
                                 original_text: str) -> List[Dict[str, Any]]:
        """后处理分块，添加元数据和重叠"""
        try:
            processed_chunks = []
            
            for i, chunk in enumerate(chunks):
                # 计算位置
                start_pos = original_text.find(chunk[:50]) if len(chunk) >= 50 else original_text.find(chunk)
                if start_pos == -1:
                    start_pos = 0
                
                end_pos = start_pos + len(chunk)
                
                # 添加重叠内容
                if i > 0 and self.chunk_overlap > 0:
                    prev_chunk = chunks[i - 1]
                    overlap_text = prev_chunk[-self.chunk_overlap:] if len(prev_chunk) > self.chunk_overlap else prev_chunk
                    chunk = overlap_text + "\n" + chunk
                    overlap_size = len(overlap_text)
                else:
                    overlap_size = 0
                
                # 计算语义得分
                semantic_score = await self._calculate_semantic_score(chunk)
                
                # 创建元数据
                metadata = ChunkMetadata(
                    chunk_id=f"chunk_{i}",
                    start_pos=start_pos,
                    end_pos=end_pos,
                    word_count=len(chunk.split()),
                    sentence_count=len(re.split(r'[.!?。！？]', chunk)),
                    overlap_with_previous=overlap_size,
                    semantic_score=semantic_score,
                    language=self.language
                )
                
                processed_chunks.append({
                    'content': chunk.strip(),
                    'metadata': metadata.__dict__
                })
            
            return processed_chunks
            
        except Exception as e:
            logger.error(f"后处理分块失败: {e}")
            # 返回简单格式
            return [{
                'content': chunk,
                'metadata': {'chunk_id': f'chunk_{i}'}
            } for i, chunk in enumerate(chunks)]
    
    async def _calculate_semantic_score(self, text: str) -> float:
        """计算文本的语义连贯性得分"""
        try:
            if not text.strip():
                return 0.0
            
            words = text.lower().split()
            if not words:
                return 0.0
            
            # 计算非停用词比例
            content_words = [w for w in words if w not in self.stop_words and len(w) > 2]
            content_ratio = len(content_words) / len(words) if words else 0
            
            # 计算词汇多样性
            unique_words = set(words)
            diversity_ratio = len(unique_words) / len(words) if words else 0
            
            # 计算平均词长
            avg_word_length = sum(len(w) for w in words) / len(words) if words else 0
            word_length_score = min(avg_word_length / 6, 1.0)  # 标准化到0-1
            
            # 综合得分
            semantic_score = (
                content_ratio * 0.4 +
                diversity_ratio * 0.3 +
                word_length_score * 0.3
            )
            
            return round(semantic_score, 3)
            
        except Exception as e:
            logger.error(f"计算语义得分失败: {e}")
            return 0.5  # 默认中等得分
    
    def get_chunk_stats(self, chunks: List[Dict[str, Any]]) -> Dict[str, Any]:
        """获取分块统计信息"""
        try:
            if not chunks:
                return {}
            
            chunk_sizes = [len(chunk['content']) for chunk in chunks]
            semantic_scores = [chunk['metadata'].get('semantic_score', 0) for chunk in chunks]
            
            return {
                'total_chunks': len(chunks),
                'avg_chunk_size': sum(chunk_sizes) / len(chunk_sizes),
                'min_chunk_size': min(chunk_sizes),
                'max_chunk_size': max(chunk_sizes),
                'avg_semantic_score': sum(semantic_scores) / len(semantic_scores),
                'total_content_length': sum(chunk_sizes)
            }
            
        except Exception as e:
            logger.error(f"获取分块统计失败: {e}")
            return {}