import re
from typing import List
from dis_qa.config import get_distilabel_config
import tiktoken

# 按字符长度分块，同时保证不切断句子
def do_chunk_normal(text: str, max_len: int = 1024) -> List[str]:
    """
    按字符长度分块，同时保证不切断句子。
    max_len: 每块最大字符数
    """
    sentences = re.split(r'(?<=[。！？.!?])\s*', text)  # 中文或英文句子边界
    chunks = []
    current_chunk = ""
    
    for sentence in sentences:
        if len(current_chunk) + len(sentence) <= max_len:
            current_chunk += sentence
        else:
            if current_chunk:
                chunks.append(current_chunk.strip())
            # 如果句子本身很长，直接截取
            while len(sentence) > max_len:
                chunks.append(sentence[:max_len])
                sentence = sentence[max_len:]
            current_chunk = sentence
    
    if current_chunk:
        chunks.append(current_chunk.strip())
    
    
    print(f" =========== 生成 {len(chunks)} 个 chunk ===================")
    return chunks


def chunk_with_token(text, max_tokens: int = 1024, overlap: int = 102) -> List[str]:
    """
    按 token 长度分块，同时保证不切断句子。
    max_token: 每块最大 token 数
    overlap: 重叠 token 数
    """
    enc = tiktoken.get_encoding("cl100k_base")  # 通用编码
    tokens = enc.encode(text)
    
    chunks = []
    start = 0
    
    t = 0
    # print(f"==== tokens {len(tokens)} =====")
    while start < len(tokens):
        # if t > 10:
        #     break
        t += 1
        end = min(start + max_tokens, len(tokens))
        
        # 尽量回退到句子边界
        if end < len(tokens):
            snippet = enc.decode(tokens[start:end])
            last_dot = snippet.rfind(".")
            if last_dot != -1:
                end = start + len(enc.encode(snippet[:last_dot+1]))

        if end <= start:
            end = min(len(tokens), start + max_tokens)
        
        # print(f"==== start {start} end {end} =====")
        chunk_tokens = tokens[start:end]
        chunk_text = enc.decode(chunk_tokens)
        chunks.append(chunk_text)

        # 下一块起始位置：当前 end 减去 overlap
        if end < len(tokens):
            advance = end - min(overlap, end - start)
            start = end - advance 
            if start <= start:
                start = end
        else:
            break

        # print(f" !!!!!!=====\n {chunk_text}")
        
    
    print(f" =========== 生成 {len(chunks)} 个 chunk ===================")
    return chunks



def chunk_document(path: str) -> List[str]:
    max_chunk_size = get_distilabel_config().CHUNK_SIZE
    overlap = get_distilabel_config().CHUNK_OVERLAP
    with open(path, "r", encoding="utf-8") as f:
        text = f.read()

        chunks = chunk_with_token(text, max_tokens=max_chunk_size, overlap=overlap)
        
        return chunks
    return []