from typing import List
import re

class SemanticSplitter:
    """语义分段器"""
    
    def __init__(self, config: dict):
        self.config = config
        
    def split_with_format(self, text: str) -> List[str]:
        """保留格式的语义分段，简化处理逻辑
        
        Args:
            text: 输入文本
            
        Returns:
            List[str]: 分段后的文本列表
        """
        # 1. 清理多余空行但保持基本格式
        text = self._clean_empty_lines(text)
        
        # 2. 按照300K字符大小分割
        max_chunk_size = 300000
        chunks = []
        current_chunk = ""
        
        lines = text.splitlines()
        
        for line in lines:
            # 如果当前块加上新行超过限制，开始新块
            if len(current_chunk) + len(line) > max_chunk_size:
                if current_chunk:
                    chunks.append(current_chunk)
                current_chunk = line + "\n"
            else:
                current_chunk += line + "\n"
        
        # 添加最后一个块
        if current_chunk:
            chunks.append(current_chunk)
            
        return chunks

    def _clean_empty_lines(self, text: str) -> str:
        """清理多余空行但保持基本格式"""
        lines = text.splitlines()
        cleaned_lines = []
        prev_empty = False
        
        for line in lines:
            current_empty = not line.strip()
            is_special = bool(re.match(r'^(#{1,6}|\d+\.|-|\*|\+|\s*```)', line.strip()))
            
            if not current_empty or is_special or (current_empty and not prev_empty):
                cleaned_lines.append(line)
            
            prev_empty = current_empty
            
        return '\n'.join(cleaned_lines)

    def split(self, text: str) -> List[str]:
        """兼容旧版本的分段方法"""
        return self.split_with_format(text) 