from crazy_functions.ipc_fns.mp import run_in_subprocess_with_timeout
import spacy
from typing import List
from crazy_functions.protect_math import MathFormulaProtector
import os
import datetime

# 用于在 spaCy 切分时指定临时目录，默认 None 表示使用项目根目录的 temp
_current_book_temp_dir = None

# 单例保护器，供后续公式/图片占位使用
PROTECTOR = MathFormulaProtector()

# Load a lightweight spaCy pipeline for English sentence segmentation
# ---------------------------------------------------------------
# 使用 spaCy 的 sentencizer 进行英文句子切分，相比简单的字符替换能够更准确地识别句子边界，
# 例如处理缩写、数字与句点相邻的情况，避免误切分。若 spaCy 加载失败，代码会自动回退到
# 原有的字符替换实现，保证功能不受影响。
# NOTE: The spaCy pipeline is loaded once at module import. If loading fails,
# the code gracefully falls back to the original character‑replace strategy,
# ensuring that the overall text‑splitting workflow remains functional.
# The model is loaded once; if loading fails we fall back to the original
# character‑replace strategy so the rest of the pipeline remains functional.
# 初始化全局 spaCy 管线（默认 None）
_spacy_nlp = None

# 用于缓存已创建的语言管线，避免重复创建
_spacy_nlp_cache = {}
# 用于向聊天窗口（chatbot）记录语言检测信息的全局列表
_CHATBOT_LOG = []
def get_language_detection_log():
    """
    返回在本模块中通过 _CHATBOT_LOG 记录的语言检测信息列表。
    UI 层可调用此函数获取消息并追加到 chatbot 对话框中。
    """
    return list(_CHATBOT_LOG)  # 返回副本，防止外部修改
# 语言代码到可读名称的映射（常用语言）
_LANG_NAME_MAP = {
    "en": "English",
    "zh": "中文",
    "de": "Deutsch",
    "fr": "Français",
    "es": "Español",
    "ru": "Русский",
    "ja": "日本語",
    "ko": "한국어",
    "it": "Italiano",
    "pt": "Português",
    "nl": "Nederlands",
    "sv": "Svenska",
    "pl": "Polski",
    "tr": "Türkçe",
    "cs": "Čeština",
    "da": "Dansk",
    "fi": "Suomi",
    "hu": "Magyar",
    "no": "Norsk",
    "ro": "Română",
    "el": "Ελληνικά",
    "he": "עברית",
    "ar": "العربية",
    "hi": "हिन्दी",
    "th": "ไทย",
    "vi": "Tiếng Việt",
    # 其他语言默认使用代码本身
}

def _init_spacy_nlp(lang_code: str):
    """
    根据语言代码创建一个最小的 spaCy 空管线并添加 sentencizer。
    如果创建失败返回 None，调用方会使用回退方案。
    """
    global _spacy_nlp_cache
    if lang_code in _spacy_nlp_cache:
        return _spacy_nlp_cache[lang_code]

    try:
        nlp = spacy.blank(lang_code)
        nlp.add_pipe("sentencizer")
        _spacy_nlp_cache[lang_code] = nlp
        return nlp
    except Exception as e:
        print(f"sentencizer for language '{lang_code}' 初始化失败: {e}")
        _spacy_nlp_cache[lang_code] = None
        return None
def _split_sentences_spacy(txt: str) -> List[str]:
    """使用 spaCy 根据检测到的语言进行句子切分。若初始化失败则回退为原文本单句。"""
    # 1️⃣ 检测语言（默认 fallback 为英文）
    try:
        from langdetect import detect, LangDetectException
        lang = detect(txt[:500])
        # 输出检测到的语言及其代码到聊天框
        lang_name = _LANG_NAME_MAP.get(lang, lang)
        print(f"检测语言: {lang_name} ({lang})")
        # 同时记录到全局 chatbot 日志列表
        _CHATBOT_LOG.append(f"检测语言: {lang_name} ({lang})")
        # 输出检测到的语言及其代码到聊天框
        lang_name = _LANG_NAME_MAP.get(lang, lang)
        print(f"检测语言: {lang_name} ({lang})")
        # 同时记录到全局 chatbot 日志列表
        _CHATBOT_LOG.append(f"检测语言: {lang_name} ({lang})")
        # 输出检测到的语言及其代码到聊天框
        lang_name = _LANG_NAME_MAP.get(lang, lang)
        print(f"检测语言: {lang_name} ({lang})")
    except Exception:
        lang = "en"

    # 2️⃣ 获取对应语言的 spaCy 管线（带 sentencizer）
    global _spacy_nlp
    _spacy_nlp = _init_spacy_nlp(lang)

    # 3️⃣ 若仍未能得到管线，回退为原文本单句
    if _spacy_nlp is None:
        cleaned = txt.strip()
        return [cleaned] if cleaned else []

    doc = _spacy_nlp(txt)
    # 过滤掉仅包含空白的句子
    return [sent.text.strip() for sent in doc.sents if sent.text.strip()]

# 新增：保留原始文本格式的 spaCy 切分（在 spaCy 加载失败时也保持原始文本完整性）
def _split_sentences_spacy_preserve(txt: str) -> List[str]:
    """
    使用 spaCy 根据检测到的语言切分句子，并保留原始文本中的换行、空格等格式。
    通过字符偏移直接截取原始子串，使拼接后能够完整恢复原始文本。
    """
    # 语言检测与管线获取（与 _split_sentences_spacy 相同的逻辑）
    try:
        from langdetect import detect, LangDetectException
        lang = detect(txt[:500])
        # 输出检测到的语言及其代码到聊天框
        lang_name = _LANG_NAME_MAP.get(lang, lang)
        print(f"检测语言: {lang_name} ({lang})")
    except Exception:
        lang = "en"

    global _spacy_nlp
    _spacy_nlp = _init_spacy_nlp(lang)

    if _spacy_nlp is None:
        # 回退：直接返回原始文本，保持所有换行、空格等格式不变
        return [txt] if txt else []

    doc = _spacy_nlp(txt)
    spans = list(doc.sents)
    result: List[str] = []
    for i, span in enumerate(spans):
        start = span.start_char
        end = span.end_char
        # 包含当前句子后面的所有分隔符，直到下一个句子开始位置
        next_start = spans[i + 1].start_char if i + 1 < len(spans) else len(txt)
        segment = txt[start:next_start]
        result.append(segment)
    return result

def force_breakdown(txt, limit, get_token_fn):
    """ 使用二分查找优化长文本切割效率
    """
    low, high = 0, len(txt)
    best_split = 0
    while low <= high:
        mid = (low + high) // 2
        current_tokens = get_token_fn(txt[:mid])
        if current_tokens < limit:
            best_split = mid
            low = mid + 1
        else:
            high = mid - 1
    return txt[:best_split], txt[best_split:]


def maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage):
    """ 为了加速计算，我们采样一个特殊的手段。当 remain_txt_to_cut > `_max` 时， 我们把 _max 后的文字转存至 remain_txt_to_cut_storage
    当 remain_txt_to_cut < `_min` 时，我们再把 remain_txt_to_cut_storage 中的部分文字取出
    """
    _min = int(5e4)
    _max = int(1e5)
    # print(len(remain_txt_to_cut), len(remain_txt_to_cut_storage))
    if len(remain_txt_to_cut) < _min and len(remain_txt_to_cut_storage) > 0:
        remain_txt_to_cut = remain_txt_to_cut + remain_txt_to_cut_storage
        remain_txt_to_cut_storage = ""
    if len(remain_txt_to_cut) > _max:
        remain_txt_to_cut_storage = remain_txt_to_cut[_max:] + remain_txt_to_cut_storage
        remain_txt_to_cut = remain_txt_to_cut[:_max]
    return remain_txt_to_cut, remain_txt_to_cut_storage


def cut(limit, get_token_fn, txt_tocut, must_break_at_empty_line, break_anyway=False, cut_strategy=""):
    """ 文本切分
    """
    res = []
    total_len = len(txt_tocut)
    fin_len = 0
    remain_txt_to_cut = txt_tocut
    remain_txt_to_cut_storage = ""
    # 为了加速计算，我们采样一个特殊的手段。当 remain_txt_to_cut > `_max` 时， 我们把 _max 后的文字转存至 remain_txt_to_cut_storage
    remain_txt_to_cut, remain_txt_to_cut_storage = maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage)

    while True:
        if get_token_fn(remain_txt_to_cut) <= limit:
            # 如果剩余文本的token数小于限制，那么就不用切了
            res.append(remain_txt_to_cut); fin_len+=len(remain_txt_to_cut)
            break
        else:
            # 如果剩余文本的token数大于限制，那么就切
            lines = remain_txt_to_cut.split('\n')

            # 估计一个切分点
            estimated_line_cut = limit / get_token_fn(remain_txt_to_cut) * len(lines)
            estimated_line_cut = int(estimated_line_cut)

            # 开始查找合适切分点的偏移（cnt）
            cnt = 0
            for cnt in reversed(range(estimated_line_cut)):
                if must_break_at_empty_line:
                    # 首先尝试用双空行（\n\n）作为切分点
                    if lines[cnt] != "":
                        continue
                prev = "\n".join(lines[:cnt])
                post = "\n".join(lines[cnt:])
                if get_token_fn(prev) < limit:
                    break

            if cnt == 0:
                # 如果没有找到合适的切分点
                if break_anyway:
                    # 是否允许暴力切分
                    prev, post = force_breakdown(remain_txt_to_cut, limit, get_token_fn)
                else:
                    # 不允许直接报错
                    raise RuntimeError(f"存在一行极长的文本！{remain_txt_to_cut}")

            # 追加列表
            res.append(prev); fin_len+=len(prev)
            # 准备下一次迭代
            remain_txt_to_cut = post
            remain_txt_to_cut, remain_txt_to_cut_storage = maintain_storage(remain_txt_to_cut, remain_txt_to_cut_storage)
            process = fin_len/total_len
            print(f'正在文本切分 {int(process*100)}% {cut_strategy}')
            if len(remain_txt_to_cut.strip()) == 0:
                break
    return res


def breakdown_text_to_satisfy_token_limit_(txt, limit, llm_model="gpt-3.5-turbo", prefix=""):
    """ 使用多种方式尝试切分文本，以满足 token 限制
    返回 (segments, strategy_name)
    """
    import time
    import concurrent.futures
    from request_llms.bridge_all import model_info
    enc = model_info[llm_model]['tokenizer']
    def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=()))
    
    def execute_with_timeout(func, timeout=40, strategy_name=""):
        with concurrent.futures.ThreadPoolExecutor() as executor:
            future = executor.submit(func)
            try:
                return future.result(timeout=timeout)
            except concurrent.futures.TimeoutError:
                print(f"{strategy_name}执行超时（{timeout}秒），转入下一层策略...")
                return None
    # 第1次尝试，将双空行（\n\n）作为切分点
    # try:
    #     strategy_name = "第一层切分策略：双空行"
    #     print(f"尝试{strategy_name}...")
    #     result = execute_with_timeout(
    #         lambda: cut(limit, get_token_fn, txt, must_break_at_empty_line=True, cut_strategy=f"【{strategy_name}】"),
    #         timeout=25,
    #         strategy_name=strategy_name
    #     )
    #     if result is not None:
    #         return result, strategy_name
    # except RuntimeError as e:
    #     print(f"第一层切分策略失败：{str(e)}")
    # 第2次尝试，将单空行（\n）作为切分点
    # try:
    #     strategy_name = "第二层切分策略：单空行"
    #     print(f"尝试{strategy_name}...")
    #     result = execute_with_timeout(
    #         lambda: cut(limit, get_token_fn, txt, must_break_at_empty_line=False, cut_strategy=f"【{strategy_name}】"),
    #         timeout=20,
    #         strategy_name=strategy_name
    #     )
    #     if result is not None:
    #         return result, strategy_name
    # except RuntimeError as e:
    #     print(f"第二层切分策略失败：{str(e)}")
        
    # 第3次尝试，使用spaCy进行英文句子切分
    strategy_name = "第三层切分策略：英文句号（spaCy）"
    print(f"尝试{strategy_name}...")
    def process_spacy_eng():
        """
        使用 spaCy 将英文文本切分为句子后，合并相邻句子，使每个块的 token 数尽可能接近 limit。
        在切分前先对公式、图片等特殊内容进行占位保护，切分后再恢复，防止跨切片截断。
        """
        try:
            # 1️⃣ 公式/图片占位保护
            protected_txt, formula_mapping = PROTECTOR.protect(txt)

            # 2️⃣ 将完整的掩码文本写入 temp 目录，文件名包含时间戳和前缀
            _temp_dir = _current_book_temp_dir if _current_book_temp_dir else os.path.join(os.getcwd(), "temp")
            os.makedirs(_temp_dir, exist_ok=True)
            _timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
            # 添加前缀到文件名，如果prefix不为空
            prefix_part = f"{prefix}" if prefix else ""
            _masked_path = os.path.join(_temp_dir, f"{prefix_part}spacy切分策略的整本书掩码{_timestamp}.md")
            with open(_masked_path, "w", encoding="utf-8") as _f:
                _f.write(protected_txt)

            # 2️⃣ 使用保留原始格式的切分函数对已占位的文本进行切分
            sentences = _split_sentences_spacy_preserve(protected_txt)

            segs: List[str] = []
            current_chunk = ""
            for s in sentences:
                # 若当前块为空，直接赋值
                if not current_chunk:
                    current_chunk = s
                    continue
                # 尝试将当前句子加入块中，直接拼接原始子串（已包含分隔符），无需额外添加换行符
                candidate = current_chunk + s
                if get_token_fn(candidate) <= limit:
                    current_chunk = candidate
                else:
                    # 当前块已接近 limit，先切分后加入结果
                    segs.extend(
                        cut(
                            limit,
                            get_token_fn,
                            current_chunk,
                            must_break_at_empty_line=False,
                            cut_strategy="【spaCy 合并句子】",
                        )
                    )
                    current_chunk = s
            # 处理剩余的块
            if current_chunk:
                segs.extend(
                    cut(
                        limit,
                        get_token_fn,
                        current_chunk,
                        must_break_at_empty_line=False,
                        cut_strategy="【spaCy 合并句子】",
                    )
                )

            # 3️⃣ 恢复公式/图片占位
            restored_segs = [PROTECTOR.restore(seg, formula_mapping) for seg in segs]

            # 3️⃣ 将每个合并后的切片（**仍为掩码状态**）写入同一文件，使用标题标记切片顺序
            _merged_path = os.path.join(_temp_dir, f"spacy切分策略的合并后切片{_timestamp}.md")
            with open(_merged_path, "w", encoding="utf-8") as _mf:
                for _idx, _seg in enumerate(segs, start=1):
                    _mf.write(f"\n# 切片{_idx}\n\n")
                    _mf.write(_seg)
                    _mf.write("\n\n")
            return restored_segs
        except Exception as e:
            print(f"spaCy 切分出错，回退到字符替换方式: {e}")
            res = cut(
                limit,
                get_token_fn,
                txt.replace('.', '。\n'),
                must_break_at_empty_line=False,
                cut_strategy=f"【{strategy_name}】",
            )
            return [r.replace('。\n', '.') for r in res]

    result = execute_with_timeout(
        process_spacy_eng,
        timeout=20,
        strategy_name=strategy_name,
    )
    if result is not None:
        return result, strategy_name

    # 第4次尝试，将中文句号（。）作为切分点
    try:
        strategy_name = "第四层切分策略：中文句号"
        print(f"尝试{strategy_name}...")
        def process_cn_period():
            res = cut(
                limit,
                get_token_fn,
                txt.replace('。', '。。\n'),
                must_break_at_empty_line=False,
                cut_strategy=f"【{strategy_name}】",
            )
            return [r.replace('。。\n', '。') for r in res]

        result = execute_with_timeout(
            process_cn_period,
            timeout=30,
            strategy_name=strategy_name,
        )
        if result is not None:
            return result, strategy_name
    except RuntimeError as e:
        print(f"第四层切分策略失败：{str(e)}")

    # 第5次尝试，强制切分
    strategy_name = "第五层切分策略：强制切分"
    print(f"所有切分策略均失败或超时，使用{strategy_name}...")
    try:
        result = execute_with_timeout(
            lambda: cut(
                limit,
                get_token_fn,
                txt,
                must_break_at_empty_line=False,
                break_anyway=True,
                cut_strategy=f"【{strategy_name}】",
            ),
            timeout=25,
            strategy_name=strategy_name,
        )
        if result is not None:
            return result, strategy_name
    except RuntimeError as e:
        print(f"警告：强制切分也失败了：{str(e)}。将跳过当前文本处理流程。")
        return [], strategy_name

# 修改包装器，返回(segments, strategy_name)
def breakdown_text_to_satisfy_token_limit(txt, limit, llm_model="gpt-3.5-turbo", prefix=""):
    return run_in_subprocess_with_timeout(
        breakdown_text_to_satisfy_token_limit_, timeout=60
    )(txt, limit, llm_model, prefix)

if __name__ == '__main__':
    from crazy_functions.crazy_utils import read_and_clean_pdf_text
    file_content, page_one = read_and_clean_pdf_text("build/assets/at.pdf")

    from request_llms.bridge_all import model_info
    for i in range(5):
        file_content += file_content

    print(len(file_content))
    TOKEN_LIMIT_PER_FRAGMENT = 2500
    res = breakdown_text_to_satisfy_token_limit(file_content, TOKEN_LIMIT_PER_FRAGMENT)

