import re

from rag.nlp import rag_tokenizer
from rag.nlp import tokenize


def chunk(filename, binary, tenant_id, lang, callback=None, **kwargs):
    doc = {
        "docnm_kwd": filename,
        "title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename))
    }
    doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"])

    # is it English
    eng = lang.lower() == "english"  # is_english(sections)
    try:
        # 文字转语音模型调用
        # seq2txt_mdl = LLMBundle(tenant_id, LLMType.SPEECH2TEXT, lang=lang)
        # ans = seq2txt_mdl.transcription(binary)
        ans = ""
        tokenize(doc, ans, eng)
        return [doc]
    except Exception as e:
        print("parse audio failed...", e)

    return []
