import os
import re
from typing import List
from .api_client import APIClient
from utils.logger import logger

class TextProcessor:
    def __init__(self, config: dict, api_client: APIClient):
        self.min_chunk = config['processing'].get('min_chunk_size', 500)  # 最小块大小
        self.max_chunk = config['processing'].get('max_chunk_size', 1500)  # 最大块大小
        self.sentence_end = re.compile(r'([。！？；："''"]])')  # 中文句子结束符
        self.api_client = api_client
        
    def split_text(self, text: str) -> List[str]:
        """智能分块算法"""
        if not text.strip():
            logger.error("输入文本为空")
            return []
            
        chunks = []
        current_chunk = []
        current_len = 0
        
        # 先按自然段落分割
        paragraphs = [p.strip() for p in text.split('\n\n') if p.strip()]
        logger.debug(f"原始段落数: {len(paragraphs)}")
        
        for para in paragraphs:
            if not para:  # 跳过空段落
                continue
                
            # 段落内按句子分割
            sentences = self._split_sentences(para)
            logger.trace(f"段落分解 | 原长度: {len(para)} | 句子数: {len(sentences)}")
            
            if not sentences:  # 处理无法分割的情况
                sentences = [para]
                
            for sent in sentences:
                sent_len = len(sent)
                
                # 需要分块的情况
                if current_len + sent_len > self.max_chunk:
                    split_pos = self._find_split_point(current_chunk)
                    logger.debug(f"触发分块 | 当前长度: {current_len} | 最大允许: {self.max_chunk}")
                    
                    chunks.append(''.join(current_chunk[:split_pos]))
                    current_chunk = current_chunk[split_pos-2:]  
                    current_len = sum(len(s) for s in current_chunk)
                
                current_chunk.append(sent)
                current_len += sent_len
                
            # 段落结束添加换行
            if current_chunk:  # 防止空块
                current_chunk.append('\n\n')
                current_len += 2
        
        # 处理剩余内容
        if current_chunk:
            final_chunk = ''.join(current_chunk).strip()
            if final_chunk:  # 过滤空内容
                chunks.append(final_chunk)
        logger.info(f"最终分块数: {len(chunks)} | 各块长度: {[len(c) for c in chunks]}")
        return chunks

    def _split_sentences(self, text: str) -> List[str]:
        """按中文句子分割"""
        # 改进正则表达式，包含更多中文标点
        splits = re.split(r'([。！？；：’”"』】])', text)
        sentences = []
        buffer = ''
        
        # 重组句子并保留结束符
        for i in range(0, len(splits)-1, 2):
            buffer += splits[i] + splits[i+1]
            if len(buffer.strip()) > 10:  # 降低最小长度要求
                sentences.append(buffer.strip())
                buffer = ''
        if buffer:
            sentences.append(buffer.strip())
        return sentences

    def _find_split_point(self, sentences: List[str], look_back=3) -> int:
        """寻找最佳分割位置"""
        # 优先在段落末尾分割
        for i in reversed(range(len(sentences)-look_back, len(sentences))):
            if sentences[i].endswith('\n\n'):
                return i+1
        
        # 其次在句子结尾分割
        for i in reversed(range(len(sentences))):
            if self.sentence_end.search(sentences[i]):
                return i+1
        
        # 最后在达到最大长度时强制分割
        return max(1, len(sentences)-2)  # 至少保留两个句子

    def merge_text(self, chunks: list) -> str:
        """智能合并逻辑"""
        merged = []
        prev_end = ""
        for chunk in chunks:
            # 查找重叠部分
            overlap = os.path.commonprefix([prev_end, chunk])
            if len(overlap) > 50:
                merged.append(chunk[len(overlap):])
            else:
                merged.append(chunk)
            prev_end = chunk[-200:]  # 记录结尾部分
        return '\n\n'.join(merged) 