import re
import jieba
import nltk
import warnings
from src.ui.i18n import _
from src.config import app_config

# Suppress the pkg_resources deprecation warning from jieba
warnings.filterwarnings("ignore", category=UserWarning, module="jieba")

# 尝试加载NLTK的句子分割器，如果失败则记录日志
try:
    # 忽略NLTK的ResourceWarning
    warnings.filterwarnings("ignore", category=UserWarning, module="nltk")
    
    # 注意：在首次运行时，可能需要下载punkt数据包
    # nltk.download('punkt', quiet=True) 
    from nltk.data import load
    # 预加载几种常见语言的分句器
    _SENT_TOKENIZERS = {
        'en': load('tokenizers/punkt/english.pickle'),
        'zh': None,  # 中文使用jieba
        'ja': load('tokenizers/punkt/japanese.pickle'),
        'ko': None,  # 韩文可能也需要特殊处理
        # 可以根据需要添加更多语言
    }
except Exception as e:
    print(f"Warning: Failed to load NLTK sentence tokenizers: {e}")
    _SENT_TOKENIZERS = {}

def smart_segment_text(text, lang_code, max_chars_per_segment=500):
    """
    对给定文本进行智能断句，使其适合翻译。
    
    Args:
        text (str): 需要断句的原始文本。
        lang_code (str): 文本的语言代码（例如 'zh', 'en'）。
        max_chars_per_segment (int): 每个片段的最大字符数。
        
    Returns:
        list[str]: 分割后的文本片段列表。
    """
    if not text.strip():
        return [text]
    
    # 1. 首先尝试使用语言特定的分句器
    segments = []
    if lang_code.startswith('zh'):
        # 对于中文，使用jieba进行分句
        segments = _segment_chinese(text, max_chars_per_segment)
    elif lang_code in _SENT_TOKENIZERS and _SENT_TOKENIZERS[lang_code]:
        # 对于有预加载分句器的其他语言
        try:
            nltk_sentences = _SENT_TOKENIZERS[lang_code].tokenize(text)
            segments = _resegment_by_length(nltk_sentences, max_chars_per_segment)
        except Exception as e:
            print(f"Warning: NLTK segmentation failed for {lang_code}, falling back to length-based split: {e}")
            segments = _split_by_length(text, max_chars_per_segment)
    else:
        # 对于没有特定分句器的语言，或者加载失败的情况，回退到基于长度的分割
        segments = _split_by_length(text, max_chars_per_segment)
        
    return segments

def _segment_chinese(text, max_chars):
    """使用jieba对中文文本进行分句"""
    # jieba本身主要用于分词，但对于简单断句可以结合标点使用
    # 一种方法是先按标点切分，然后对长段落再处理
    sentences = re.split(r'(?<=[。！？!?;；])', text)  # 按常见中文标点切分
    return _resegment_by_length(sentences, max_chars)

def _resegment_by_length(sentences, max_chars):
    """将已分割的句子列表按照最大长度重新组合"""
    result = []
    current_segment = ""
    
    for sentence in sentences:
        sentence = sentence.strip()
        if not sentence:
            continue
            
        # 如果单个句子就很长，则强制分割
        if len(sentence) > max_chars:
            if current_segment:
                result.append(current_segment.strip())
                current_segment = ""
            # 强制分割长句子
            result.extend(_split_by_length(sentence, max_chars))
            continue
            
        # 检查加入当前句子是否会超限
        test_segment = (current_segment + " " + sentence).strip()
        if len(test_segment) <= max_chars:
            current_segment = test_segment
        else:
            # 如果会超限，则先保存当前段，再开始新段
            if current_segment:
                result.append(current_segment.strip())
            current_segment = sentence
            
    # 保存最后一段
    if current_segment:
        result.append(current_segment.strip())
        
    return result if result else [text] # 防止空结果

def _split_by_length(text, max_chars):
    """简单的基于长度的文本分割"""
    if len(text) <= max_chars:
        return [text]
    
    segments = []
    start = 0
    while start < len(text):
        end = min(start + max_chars, len(text))
        # 尽量在空格或标点后断开，避免切断单词
        # 这是一个简化的逻辑，可以进一步优化
        if end < len(text):
            # 向后查找合适的断点
            for i in range(end, start, -1):
                if text[i] in ' .,;:!?，。；：！？\n':
                    end = i + 1
                    break
        segment = text[start:end].strip()
        if segment: # 避免添加空字符串
            segments.append(segment)
        start = end
        
    return segments if segments else [text]