import re
from typing import List, Dict, Any


def doc_split(text):
    pattern = r'(第[一二三四五六七八九十百千]+条.*?)(?=第[一二三四五六七八九十百千]+条|$)'
    return re.findall(pattern, text, re.DOTALL)
    # pass


def slice_text_for_rag(
        text: str,
        chunk_size: int = 500,
        overlap: int = 100,
        separator: str = r'[.!?。！？]',
        metadata: Dict[str, Any] = None
) -> List[Dict[str, Any]]:
    """
    将文本切片为适合RAG系统的小块

    参数:
        text: 要切片的原始文本
        chunk_size: 每个切片的最大token数
        overlap: 相邻切片之间的重叠token数
        separator: 用于分割文本的正则表达式模式
        metadata: 要添加到每个切片的元数据

    返回:
        包含切片文本和元数据的字典列表
    """
    if not text or text is None:
        return []

    # 使用正则表达式按句子分割文本
    sentences = re.split(separator, text)
    if len(sentences) == 1 and not sentences[0].strip():
        return []

    # 清理空句子并添加分隔符
    sentences = [s.strip() for s in sentences if s.strip()]
    for i in range(len(sentences) - 1):
        match = re.search(separator, text)
        separator_char = match.group(0) if match else '.'
        sentences[i] += separator_char

    chunks = []
    current_chunk = []
    current_size = 0

    for sentence in sentences:
        # 简单估算token数（1个中文或2个英文算1个token）
        tokens = len(re.findall(r'[\u4e00-\u9fff]|[a-zA-Z]{2}', sentence))

        # 如果添加当前句子会超过chunk_size，且已有内容，就创建新切片
        if current_size + tokens > chunk_size and current_chunk:
            chunks.append(''.join(current_chunk))

            # 创建重叠部分
            overlap_text = []
            overlap_size = 0
            for s in reversed(current_chunk):
                s_tokens = len(re.findall(r'[\u4e00-\u9fff]|[a-zA-Z]{2}', s))
                if overlap_size + s_tokens > overlap:
                    break
                overlap_text.insert(0, s)
                overlap_size += s_tokens

            current_chunk = overlap_text
            current_size = overlap_size

        current_chunk.append(sentence)
        current_size += tokens

    # 添加最后一个切片
    if current_chunk:
        chunks.append(''.join(current_chunk))

    # 添加元数据
    result = []
    default_metadata = metadata or {}
    for i, chunk in enumerate(chunks):
        chunk_metadata = default_metadata.copy()
        chunk_metadata['chunk_id'] = i
        chunk_metadata['start_pos'] = text.find(chunk)
        chunk_metadata['end_pos'] = chunk_metadata['start_pos'] + len(chunk)
        result.append({
            'text': chunk,
            'metadata': chunk_metadata
        })

    return result


if __name__ == '__main__':
    doc = "data/docs/中国联通合作方数据安全管理规范.txt"
    with open(doc, encoding="utf-8") as f:
        w = f.read()
    s = doc_split(w)
    for x in s:
        print(x)
