import os
import re
from datetime import datetime
from typing import List, Optional, Any
import logging
import os.path
import json
import cv2
import fitz
import jieba
import numpy as np
import pandas as pd
from langchain_text_splitters import RecursiveCharacterTextSplitter
from tqdm import tqdm
from langchain_core.documents import Document
from config import SUPPORTED_IMAGE_FORMATS, CUSTOM_SEP
from pasers.paser_utils import re_filter
from table_utils import get_md_tables
from pasers.xlsx_parser.xlsx_parse import prepare_excel_table
from collections import defaultdict


def traverse_dir(path, tails, file_info_ls=None):
    """
    :param path:
    :param tails:
    :param file_info_ls:
    :return: [(文件路径1，文件名1),(文件路径2，文件名2),(文件路径3，文件名3),...,(文件路径n，文件名n)]
    注意：如路经太深python就无法检测该文件了，遇到这种情况请开启 clean_path功能；
    参考： https://blog.csdn.net/baidu_34928905/article/details/131935428
    """
    if file_info_ls is None:
        file_info_ls = []
    os.makedirs(path, exist_ok=True)
    for file in os.listdir(path):
        file_path = os.path.join(path, file)
        if os.path.isdir(file_path):
            traverse_dir(file_path, tails, file_info_ls)
        else:
            file_e_ls = os.path.basename(file_path).split('.')
            file_tail = file_e_ls[-1]
            if (tails is None or file_tail.lower() in tails) and not os.path.basename(file_path).startswith('~$'):
                item = (clean_path(file_path), '.'.join(file_e_ls[0:-1]))
                file_info_ls.append(item)
    return file_info_ls


def clean_path(path):
    """
    清理并格式化文件路径，以确保跨平台的兼容性并修复 Windows 的路径长度限制。
    Args:
        path (str): 输入的文件路径，可能包含不同平台的分隔符或相对路径表示。
    Returns:
        str: 清理后的路径，适用于当前操作系统的路径格式。
    """

    # 将路径中的分隔符统一替换为当前操作系统的默认分隔符
    path = path.replace('/', os.sep).replace('\\', os.sep)

    # 在 Windows 上，如果路径长度超过 260 个字符，调整为支持长路径格式
    if os.sep == '\\' and '\\\\?\\' not in path:
        # 计算路径中包含的相对父级路径（".."）的层级数量
        relative_levels = len([directory for directory in path.split(os.sep) if directory == '..'])

        # 获取当前工作目录并按分隔符分割为列表
        cwd = [directory for directory in os.getcwd().split(os.sep)] if ':' not in path else []

        # 组合新路径，去除多余的相对路径，并加上长路径前缀 '\\\\?\\'
        path = '\\\\?\\' + os.sep.join(
            cwd[:len(cwd) - relative_levels] +
            [directory for directory in path.split(os.sep) if directory != ''][relative_levels:]
        )

    return path


def _split_text_with_regex_from_end(
        text: str, separator: str, keep_separator: bool
) -> List[str]:
    # Now that we have the separator, split the text
    if separator:
        if keep_separator:
            # The parentheses in the pattern keep the delimiters in the result.
            _splits = re.split(f"({separator})", text)
            splits = ["".join(i) for i in zip(_splits[0::2], _splits[1::2])]
            if len(_splits) % 2 == 1:
                splits += _splits[-1:]
            # splits = [_splits[0]] + splits
        else:
            splits = re.split(separator, text)
    else:
        splits = list(text)
    return [s for s in splits if s != ""]


class ChineseRecursiveTextSplitter(RecursiveCharacterTextSplitter):
    def __init__(
            self,
            separators: Optional[List[str]] = None,
            keep_separator: bool = True,
            is_separator_regex: bool = True,
            **kwargs: Any,
    ) -> None:
        """Create a new TextSplitter."""
        super().__init__(keep_separator=keep_separator, **kwargs)
        self._separators = separators or [
            "\n\n",
            "\n",
            "。|！|？",
            "\.\s|\!\s|\?\s",
            "；|;\s",
            "，|,\s"
        ]
        self._is_separator_regex = is_separator_regex

    def _split_text(self, text: str, separators: List[str]) -> List[str]:
        """Split incoming text and return chunks."""
        final_chunks = []
        # Get appropriate separator to use
        separator = separators[-1]
        new_separators = []
        for i, _s in enumerate(separators):
            _separator = _s if self._is_separator_regex else re.escape(_s)
            if _s == "":
                separator = _s
                break
            if re.search(_separator, text):
                separator = _s
                new_separators = separators[i + 1:]
                break

        _separator = separator if self._is_separator_regex else re.escape(separator)
        splits = _split_text_with_regex_from_end(text, _separator, self._keep_separator)

        # Now go merging things, recursively splitting longer texts.
        _good_splits = []
        _separator = "" if self._keep_separator else separator
        for s in splits:
            if self._length_function(s) < self._chunk_size:
                _good_splits.append(s)
            else:
                if _good_splits:
                    merged_text = self._merge_splits(_good_splits, _separator)
                    final_chunks.extend(merged_text)
                    _good_splits = []
                if not new_separators:
                    final_chunks.append(s)
                else:
                    other_info = self._split_text(s, new_separators)
                    final_chunks.extend(other_info)
        if _good_splits:
            merged_text = self._merge_splits(_good_splits, _separator)
            final_chunks.extend(merged_text)
        return [re.sub(r"\n{2,}", "\n", chunk.strip()) for chunk in final_chunks if chunk.strip() != ""]


def rmb_trans(rmb_amount):
    """
    将中文大写金额字符转换成阿拉伯数字。

    参数:
        rmb_amount (str): 中文大写金额字符，例如 "壹亿贰仟叁佰万肆仟伍佰陆拾元柒角捌分"。

    返回:
        float: 转换后的阿拉伯数字金额。
    """

    # 定义中文数字和阿拉伯数字的对应关系
    chinese_num = {'零': 0, '壹': 1, '贰': 2, '叁': 3, '肆': 4, '伍': 5, '陆': 6, '柒': 7, '捌': 8, '玖': 9}
    # 定义中文金额单位与对应的倍数关系
    chinese_amount = {'分': 0.01, '角': 0.1, '元': 1, '拾': 10, '佰': 100, '仟': 1000, '圆': 1}

    amount_float = 0  # 初始化结果金额

    # 处理 "亿" 单位的金额
    if '亿' in rmb_amount:
        # 提取 "亿" 前的部分
        yi = re.match(r'(.+)亿.*', rmb_amount).group(1)
        amount_yi = 0

        # 遍历中文金额单位，计算 "亿" 部分的数值
        for i in chinese_amount:
            if i in yi:
                amount_yi += chinese_num[yi[yi.index(i) - 1]] * chinese_amount[i]

        # 如果最后一个字符是数字，直接加到 amount_yi 中
        if yi[-1] in chinese_num.keys():
            amount_yi += chinese_num[yi[-1]]

        # 将 "亿" 部分的金额转为亿的单位，并加到总金额中
        amount_float += amount_yi * 100000000
        # 移除已处理的 "亿" 部分
        rmb_amount = re.sub(r'.+亿', '', rmb_amount, count=1)

    # 处理 "万" 单位的金额
    if '万' in rmb_amount:
        # 提取 "万" 前的部分
        wan = re.match(r'(.+)万.*', rmb_amount).group(1)
        amount_wan = 0

        # 遍历中文金额单位，计算 "万" 部分的数值
        for i in chinese_amount:
            if i in wan:
                amount_wan += chinese_num[wan[wan.index(i) - 1]] * chinese_amount[i]

        # 如果最后一个字符是数字，直接加到 amount_wan 中
        if wan[-1] in chinese_num.keys():
            amount_wan += chinese_num[wan[-1]]

        # 将 "万" 部分的金额转为万的单位，并加到总金额中
        amount_float += amount_wan * 10000
        # 移除已处理的 "万" 部分
        rmb_amount = re.sub(r'.+万', '', rmb_amount, count=1)

    # 处理 "元" 单位及其以下的金额
    amount_yuan = 0
    for i in chinese_amount:
        if i in rmb_amount:
            # 判断中文数字是否存在于当前单位之前
            if rmb_amount[rmb_amount.index(i) - 1] in chinese_num.keys():
                amount_yuan += chinese_num[rmb_amount[rmb_amount.index(i) - 1]] * chinese_amount[i]

    # 将 "元" 及其以下的金额加到总金额中
    amount_float += amount_yuan

    return amount_float


def clean_content(content):
    """
    清理内容中的特殊字符并将其转换为小写。
    """
    return re.sub(r'[\-_/:#=?@]', '', content.lower())


def get_custom_features(content_ls, to_expr=False):
    """
    提取字符串列表中每个字符串的字母数字特征和中文大写金额，生成简化特征集合。

    参数:
        content_ls (list of str): 字符串列表。

    返回:
        list of str: 每个字符串的提取特征，以换行符分隔。
    """
    res = []
    exclude_words = {'metaindex', 'unnamed'}
    pattern = re.compile(
        r"[壹贰叁肆伍陆柒捌玖拾佰仟][壹贰叁肆伍陆柒捌玖拾佰仟元角万分百整零]+[壹贰叁肆伍陆柒捌玖拾佰仟元角万分百整零]")

    for content in content_ls:
        # 提取并转换大写中文金额
        money_ch = pattern.findall(content)
        money_ch_set = {str(rmb_trans(m)) for m in money_ch}

        # 清理并提取字母数字组合
        cleaned_content = clean_content(content)
        num_letters = {i for i in re.findall(r'[a-z0-9]+', cleaned_content) if i not in exclude_words}
        # 合并金额与字母数字集合
        feature_set = num_letters | money_ch_set
        res.append('\n'.join(feature_set) if feature_set else '')
    if to_expr:
        e_ls = []
        for i in res:
            if i and i not in e_ls:
                e_ls.extend(i.split('\n'))

        return f"TEXT_MATCH(addedValue,'{' '.join(e_ls)}')"
    return res


def log_message(message, level=logging.INFO):
    """Log messages with timestamp."""
    timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    logging.log(level, f"{timestamp} 多模态检索镜像 {message}")


def split_content_with_media(content, table_info, image_info, spliter, split_size):
    """
    这是一个分割包含表格和图片信息的内容的函数

    参数:
    :param content: 要切块的内容，包含正文和特殊标签
    :param table_info: 表格信息字典，包含表格内容及其标识符
    :param image_info: 图片信息字典，包含图片内容、路径及其标识符
    :param spliter: 用于文本切割的工具或对象
    :param split_size: 单个块的最大大小
    :return: 两个列表 (split_ls, img_path_ls)
             split_ls: 切割后的文本块列表
             img_path_ls: 与文本块对应的图片路径列表（若无图片则为空字符串）
    """
    split_ls, img_path_ls = [], []

    # 判断是否有表格或图片信息
    if not (table_info or image_info):
        text_ls = filter_texts_and_images(spliter.split_text(content))
        return text_ls, [''] * len(text_ls)

    # 根据标签 <table:...> 和 <image:...> 分割内容
    content_blocks = re.split(r'(<[tableimg]+:[^>]+>)', content)

    for block in content_blocks:
        block = block.strip()
        if not block:
            continue  # 跳过空白块

        # 检查表格和图片的标识符
        table_key = re.findall(r'<table:([^>]+)>', block)
        image_key = re.findall(r'<image:([^>]+)>', block)

        # 表格标识符块处理
        if table_key:
            table_dic = table_info.get(table_key[0])
            if table_dic:
                sub_tables, exceed_cell_ls = get_md_tables(table_dic, split_size)
                split_ls.extend(sub_tables)
                img_path_ls.extend([''] * len(sub_tables))

                # 处理超长单元格内容
                if exceed_cell_ls:
                    exceed_text_ls = spliter.split_text('\n'.join(exceed_cell_ls))
                    split_ls.extend(exceed_text_ls)
                    img_path_ls.extend([''] * len(exceed_text_ls))

        # 图片标识符块处理
        elif image_key:
            image_data = image_info.get(image_key[0])
            if image_data['content']:
                block_text = re_filter(' '.join([line[1][0] for line in image_data['content']])).strip()
                text_ls = spliter.split_text(block_text)
                split_ls.extend(text_ls)
                img_path_ls.extend([image_data['path']] * len(text_ls))

        # 普通文本块处理
        else:
            text_ls = filter_texts_and_images(spliter.split_text(block))
            split_ls.extend(text_ls)
            img_path_ls.extend([''] * len(text_ls))

    return split_ls, img_path_ls


def filter_texts_and_images(text_list):
    """
    Filter out entries in text_list that contain short alphanumeric sequences (1 to 3 characters).
    If the filtered text is not empty, retain it and its corresponding image path in the result lists.

    Args:
        text_list (list of str): List of text entries to be filtered.

    Returns:
        tuple: Two lists - one with filtered text entries and one with the corresponding image paths.
    """
    filtered_texts = []
    pattern = re.compile(r'[0-9a-zA-Z ]{1,3}')
    for i, text in enumerate(text_list):

        # Remove short alphanumeric sequences (1 to 3 characters)
        cleaned_text = re.sub(pattern, '', text)

        # Only keep non-empty texts and their corresponding image paths
        if len(cleaned_text) > 0:
            filtered_texts.append(text)
    return filtered_texts


def split_table_only(content, table_info, spliter, split_size):
    """
    这是一个分割包含表格和图片信息的内容的函数

    参数:
    :param content: 要切块的内容，包含正文和特殊标签
    :param table_info: 表格信息字典，包含表格内容及其标识符
    :param image_info: 图片信息字典，包含图片内容、路径及其标识符
    :param spliter: 用于文本切割的工具或对象
    :param split_size: 单个块的最大大小
    :return: 两个列表 (split_ls, img_path_ls)
             split_ls: 切割后的文本块列表
             img_path_ls: 与文本块对应的图片路径列表（若无图片则为空字符串）
    """
    split_ls = []
    content_blocks = re.split(r'(<table:[^>]+>)', content)
    for block in content_blocks:
        block = block.strip()
        if not block:
            continue  # 跳过空白块
        # 检查表格和图片的标识符
        table_key = re.findall(r'<table:([^>]+)>', block)
        # 表格标识符块处理
        if table_key:
            table_dic = table_info.get(table_key[0])
            if table_dic:
                sub_tables, exceed_cell_ls = get_md_tables(table_dic, split_size)
                split_ls.extend(sub_tables)
                # 处理超长单元格内容
                if exceed_cell_ls:
                    exceed_text_ls = spliter.split_text('\n'.join(exceed_cell_ls))
                    split_ls.extend(exceed_text_ls)
        # 普通文本块处理
        else:
            text_ls = filter_texts_and_images(spliter.split_text(block))
            split_ls.extend(text_ls)
    return split_ls


def append_records(db_data, content_list, image_list, pos_list, uuid, to_expr, register_dict):
    """
    将内容、图片、块位置信息以及其他元数据统一添加到 db_data 中。
    """
    fn = register_dict[uuid]['name']
    n = len(content_list)
    db_data['block'].extend(content_list)
    db_data['addedValue'].extend(get_custom_features(content_list, to_expr=to_expr))
    db_data['imagePath'].extend(image_list)
    db_data['fileName'].extend([fn] * n)
    # db_data['fileType'].extend([register_dict[uuid]['fileType']] * n)
    # db_data['srcID'].extend([register_dict[uuid]['srcID']] * n)
    # db_data['fileDesc'].extend([fn + CUSTOM_SEP + fd] * n)
    db_data['blockPosition'].extend(pos_list)


def update_index(name_index, db_data, name, file_suffix, prev_index):
    """
    若 db_data 中有新添加文本，则在 name_index 记录该区间，返回当前最新文本块起始位置。
    """
    cur_len = len(db_data['block'])
    if cur_len > prev_index:
        name_index.append([name, file_suffix, prev_index, cur_len])
    return cur_len


def split_xlsx_xls(failed_ls, converted_dir, rc_splitter, db_data, name_index, split_size, register_dict, with_position,
                   to_expr):
    """
    解析并划分 xlsx 和 xls 文件内容，并保存到 db_data 中。
    注意：xls 文件需先转换为 xlsx 格式。
    """

    def parse_xlsx_content(files, file_suffix):
        temp_index = len(db_data['imagePath'])
        bar = tqdm(files, total=len(files))
        for fp, uuid in bar:
            try:
                bar.set_description(f'为 {uuid[0:8]}划分段落')
                # 读取 Excel 文件中所有工作表
                sheets = pd.read_excel(fp, sheet_name=None).items()
                for sheet_name, df in sheets:
                    # 解析表格内容和额外数据
                    table_ls, extra_data_ls, table_pos_ls = prepare_excel_table(df, max_len=split_size)
                    n_table = len(table_ls)
                    if with_position:
                        # 表格位置信息调整（excel 行列从1开始，且表头占一行）
                        table_pos_ls = [
                            json.dumps({
                                "table": {
                                    'sheet': sheet_name, 'startPos': [r_start + 2, col_start + 1],
                                    'endPos': [r_end + 2, col_end + 1]
                                }
                            }, ensure_ascii=False)
                            for ((r_start, col_start), (r_end, col_end)) in table_pos_ls
                        ]
                    else:
                        table_pos_ls = ["-1"] * n_table
                    # 保存表格数据
                    append_records(
                        db_data,
                        table_ls,
                        [''] * n_table,
                        table_pos_ls,
                        uuid,
                        to_expr,
                        register_dict
                    )
                    # 处理额外数据：先分割文本再存储
                    for extra_info in extra_data_ls:
                        # extra_data_ls 中每个元素形如 ((r, c), content)
                        (r_c, content) = extra_info
                        split_texts = rc_splitter.split_text(content)
                        n_extra = len(split_texts)

                        if with_position:
                            extra_pos = json.dumps({'sheet': sheet_name, 'startPos': [r_c[0] + 2, r_c[1] + 1],
                                                    'endPos': [r_c[0] + 2, r_c[1] + 1]}, ensure_ascii=False)
                        else:
                            extra_pos = "-1"

                        append_records(
                            db_data,
                            split_texts,
                            [''] * n_extra,
                            [extra_pos] * n_extra,
                            uuid,
                            to_expr,
                            register_dict
                        )
                temp_index = update_index(name_index, db_data, register_dict[uuid]['name'], file_suffix, temp_index)
            except Exception as e:
                failed_ls.append({'filePath': fp, 'reason': str(e)})

    # 获取目录中 xlsx 和已转换的 xls 文件（已转换为 xlsx 格式）
    xlsx_files = traverse_dir(f'{converted_dir}/xlsx_files', ['xlsx'])
    xls_files = traverse_dir(f'{converted_dir}/xls2xlsx_files', ['xlsx'])
    # 解析并保存内容
    if xlsx_files:
        parse_xlsx_content(xlsx_files, file_suffix='xlsx')
    if xls_files:
        parse_xlsx_content(xls_files, file_suffix='xls')


def get_split_documents(content, separator):
    """
    以(doc_id, page)为key，对数据进行分组，并重构每一页的原文。
    同时记录每个 sub_content 在重构后的原文中的位置（偏移量）。

    返回：
    - reconstructed: { (doc_id, page): 原文 }
    - mapping: { (doc_id, page): list of { 'sub_content': str, 'start': int, 'end': int, 'orig_position': list } }
    """
    # 假设 content 的每个元素为 [doc_id, page, pos, sub_content]
    groups = defaultdict(list)
    for doc_id, page, pos, sub_content in content:
        # 注意：原来的代码把position定义为 (item[3], item[2])，即(sub_content, pos)
        groups[(doc_id, page)].append({
            "position": (sub_content, pos),
            "sub_content": sub_content
        })

    # 利用列表生成式构造 spliter_docs
    spliter_docs = [
        Document(
            page_content=separator.join(item["sub_content"] for item in group_items),
            metadata={
                'doc_id': key[0],
                'page': key[1],
                'position': [item["position"] for item in group_items]
            }
        )
        for key, group_items in groups.items()
    ]
    return spliter_docs


def split_ofd_content(failed_ls,spliter, parse_data, db_data, name_index, split_size, register_dict, with_position, to_expr):
    """
    解析并划分 ofd 内容，处理文本和表格信息，将划分后的数据存入 db_data 中。
    """
    json_path = os.path.join(parse_data, 'ofd_content.json')
    if not os.path.exists(json_path):
        return
    separator = CUSTOM_SEP
    json_path = os.path.join(parse_data, 'ofd_content.json')
    temp_index = len(db_data['imagePath'])
    with open(json_path, 'r', encoding='utf-8') as file:
        data_dict = json.load(file)
        file_suffix = data_dict['file_suffix']
        content_ls = tqdm(data_dict['file_list'])
        for content_dict in content_ls:
            fp = content_dict['file_path']
            try:
                uuid = content_dict['file_uuid']
                content_ls.set_description(f'为 {uuid[0:8]}划分段落')
                image_path = content_dict['image_path']
                table_info = content_dict['table_info']
                is_invoice = content_dict['is_invoice']
                content = content_dict['content']
                if is_invoice:
                    if table_info:
                        sub_page_ls = split_table_only(content, table_info, spliter, split_size)
                    else:
                        sub_page_ls = filter_texts_and_images(spliter.split_text(content))
                    sub_position_ls = ["-1"] * len(sub_page_ls)
                else:
                    split_documents = get_split_documents(content, separator)
                    sub_page_ls = []
                    for doc in spliter.split_documents(split_documents):
                        page_content_ls = doc.page_content.split(separator)
                        sub_page_ls.append(' '.join(page_content_ls))
                    content = ''.join(sub_page_ls)
                    sub_content_ls = spliter.split_text(content)
                    sub_position_ls=["-1"] * len(sub_content_ls)
                img_str_ls = [image_path] * len(sub_content_ls)
                append_records(
                    db_data,
                    sub_content_ls,
                    img_str_ls,
                    sub_position_ls,
                    uuid,
                    to_expr,
                    register_dict
                )
                temp_index = update_index(name_index, db_data, register_dict[uuid]['name'], file_suffix, temp_index)
            except Exception as e:
                failed_ls.append({'filePath': fp, 'reason': str(e)})


def process_image_content(content, image_info, spliter):
    split_ls, img_path_ls, split_pos_ls = [], [], []
    # 根据标签 <image:...> 分割内容，预先去除空白块
    file_data_ls = [block.strip() for block in re.split(r'(<[tableimg]+:[^>]+>)', content) if block.strip()]
    for file_data in file_data_ls:
        # 查找 <image:...> 标签，取出图片键
        image_match = re.search(r'<image:([^>]+)>', file_data)
        if not image_match:
            continue
        image_key = image_match.group(1)
        # 根据图片键获取图片数据，若找不到则跳过
        image_data = image_info.get(image_key)
        if not image_data:
            continue
        image_pos_info = image_data.get('content', [])
        # 提取 image_content 每一项内的文本部分（结构为： (pos, (text, score)) ）
        ocr_text = re_filter("\n".join(item[1][0] for item in image_pos_info)).strip()
        # 通过 spliter 分段文本并扩展至结果列表
        text_ls = spliter.split_text(ocr_text)
        split_ls.extend(text_ls)
        # 对应图片的路径，生成与文本段数量相同的图片路径列表
        img_path_ls.extend([image_data.get('path')] * len(text_ls))
        # 利用列表推导生成与 block_text 匹配的位置信息
        file_data_pos = []
        for block_text in text_ls:
            block_pos = []
            for pos_item in image_pos_info:
                pos_text = pos_item[1][0]
                if pos_text in block_text:
                    block_pos.append({"text": pos_text, "pos": pos_item[0]})
            if block_pos:
                file_data_pos.append(json.dumps(block_pos, ensure_ascii=False))
            else:
                file_data_pos.append("-1")
        split_pos_ls.extend(file_data_pos)
    return split_ls, split_pos_ls, img_path_ls


def split_img_content(failed_ls, parse_data, spliter, db_data, name_index, register_dict, with_position, to_expr):
    """
    划分 docx、doc、xml、ofd 和 img 文件，并保存到 db_data 中（doc 需先转换为 docx）。
    """
    temp_index = len(db_data['imagePath'])
    iter_file = {
        item for item in traverse_dir(parse_data, ['json'])
        if item[-1] in {f'{i}_content' for i in SUPPORTED_IMAGE_FORMATS}
    }
    for json_path, _ in iter_file:
        with open(json_path, 'r', encoding='utf-8') as file:
            data_dict = json.load(file)
            file_suffix = data_dict['file_suffix']
            bar = tqdm(data_dict['file_list'])

            for content_dict in bar:
                fp = content_dict['file_path']
                try:
                    uuid = content_dict['file_uuid']
                    bar.set_description(f'为 {uuid[0:8]}划分段落')
                    content = content_dict['content']
                    image_info = content_dict['image_info']
                    block_ls, block_pos_ls, img_path_ls = process_image_content(content, image_info, spliter)
                    if not with_position:
                        block_pos_ls = ['-1'] * len(block_ls)
                    append_records(db_data, block_ls, img_path_ls, block_pos_ls, uuid, to_expr, register_dict)
                    temp_index = update_index(name_index, db_data, register_dict[uuid]['name'], file_suffix, temp_index)
                except Exception as e:
                    failed_ls.append({'filePath': fp, 'reason': str(e)})


def split_xml_content(failed_ls, parse_data, ofd_xlm_spliter, db_data, name_index, split_size, register_dict, to_expr):
    """
    划分 docx、doc、xml、ofd 和 img 文件，并保存到 db_data 中（doc 需先转换为 docx）。
    """
    json_path = os.path.join(parse_data, 'xml_content.json')
    if not os.path.exists(json_path):
        return
    temp_index = len(db_data['imagePath'])
    with open(json_path, 'r', encoding='utf-8') as file:
        data_dict = json.load(file)
        file_suffix = data_dict['file_suffix']
        content_ls = tqdm(data_dict['file_list'])
        # 根据后缀选择合适的文本分割器
        spliter = ofd_xlm_spliter
        for content_dict in content_ls:
            uuid = content_dict['file_uuid']
            fp = content_dict['file_path']
            try:
                content_ls.set_description(f'为 {uuid[0:8]}划分段落')
                content = content_dict['content']
                image_info = content_dict['image_info']
                table_info = content_dict['table_info']
                if table_info or image_info:
                    sub_content_ls, img_str_ls = split_content_with_media(
                        content, table_info, image_info, spliter, split_size
                    )
                else:
                    sub_content_ls = filter_texts_and_images(spliter.split_text(content))
                    img_str_ls = [''] * len(sub_content_ls)
                n_sub = len(sub_content_ls)
                append_records(
                    db_data,
                    sub_content_ls,
                    img_str_ls,
                    ["-1"] * n_sub,
                    uuid,
                    to_expr,
                    register_dict
                )
                temp_index = update_index(name_index, db_data, register_dict[uuid]['name'], file_suffix, temp_index)
            except Exception as e:
                failed_ls.append({'filePath': fp, 'reason': str(e)})


def binary_to_numpy(binary_data):
    return cv2.imdecode(np.frombuffer(binary_data, np.uint8), cv2.IMREAD_COLOR)


def extract_text_from_images(image_list, pdf, ocr):
    exclude_content_set = {'CT)'}
    texts = []
    for img in image_list:
        numpy_image = binary_to_numpy(pdf.extract_image(img[0])["image"])
        result = ocr(numpy_image, use_cls=True)
        result = [[box.tolist(), (txt, score)] for txt, box, score in
                  zip(result.txts, result.boxes, result.scores)]
        if result:
            extracted_text = re_filter(' '.join(line[1][0] for line in result)).strip()
            if extracted_text and extracted_text not in exclude_content_set:
                texts.append(extracted_text)
    return '\n'.join(texts)


def process_pdf_files(pdf_path, ocr, exclude_words):
    pdf = fitz.open(pdf_path)
    pdf_dict = {}
    for page in pdf.pages():
        page_text = re_filter(page.get_text())
        text_set = {word.strip() for word in jieba.cut(page_text) if word.strip()}
        page_images = page.get_images()
        if page_images:
            image_text = extract_text_from_images(page_images, pdf, ocr)
            img_text_set = {
                word.strip() for word in jieba.cut(image_text) if word.strip() and word not in exclude_words
            } if image_text else set()
            pdf_dict[page.number + 1] = text_set | img_text_set
        else:
            pdf_dict[page.number + 1] = text_set
    return pdf_dict


def is_markdown_table(text):
    lines = text.strip().splitlines()
    if len(lines) < 2:
        return False
    # 判断是否有标题行和分隔行
    header, separator = lines[0], lines[1]
    return (
            header.startswith('|') and header.endswith('|') and
            separator.startswith('|') and separator.endswith('|') and
            re.match(r'^\|[-|:\s]+\|$', separator)
    )


def extract_header_set(text):
    lines = text.strip().splitlines()
    header_line = lines[0]
    headers = [h.strip() for h in header_line.strip('|').split('|')]
    return set(headers)


def get_pdf_pos(exclude_words, block_ls, pdf_dict, sim_threshold=0.85):
    stop_words = exclude_words.copy()
    position_ls = []
    for block in block_ls:
        if is_markdown_table(block):
            stop_words |= extract_header_set(block)

        # 生成 query_set
        query_set = {
            word.strip() for word in jieba.cut(block)
            if word.strip() and word not in stop_words
        }
        # 计算每页的相似度
        candidate_pos = {
            page: score
            for page, content_set in pdf_dict.items()
            if (score := bow_similarity(query_set, content_set)) > 0.1
        }
        # 找出所有超过阈值的页码
        pages = [page for page, score in candidate_pos.items() if score > sim_threshold]
        position = {
            'page': pages if pages else [max(candidate_pos, key=candidate_pos.get)]
        }
        position_ls.append(json.dumps(position, ensure_ascii=False))
    return position_ls


def split_docx_doc_pdf_content(failed_ls, parse_out_dir, converted_dir, spliter, db_data, name_index, split_size,
                               register_dict,
                               ocr, with_position, to_expr):
    """
    划分 docx、doc、xml、ofd 和 img 文件，并保存到 db_data 中（doc 需先转换为 docx）。
    """
    temp_index = len(db_data['imagePath'])
    exclude_words = {'-', '|', '\n'}
    file_map = {
        'docx_content': os.path.join(converted_dir, 'docx2pdf_files'),
        'doc_content': os.path.join(converted_dir, 'doc2pdf_files'),
        'pdf_content': os.path.join(converted_dir, 'pdf_files'),
    }
    # 过滤掉文件名以 'ofd_content' 结尾的文件
    iter_file = {item for item in traverse_dir(parse_out_dir, ['json']) if item[-1] in file_map.keys()}
    for json_path, json_name in iter_file:
        with open(json_path, 'r', encoding='utf-8') as file:
            data_dict = json.load(file)
            file_suffix = data_dict['file_suffix']
            content_ls = tqdm(data_dict['file_list'])
            for content_dict in content_ls:
                fp = content_dict['file_path']
                try:
                    uuid = content_dict['file_uuid']
                    content_ls.set_description(f'为 {uuid[0:8]}划分段落')
                    content = content_dict['content']
                    image_info = content_dict['image_info']
                    table_info = content_dict['table_info']
                    if table_info or image_info:
                        sub_content_ls, img_str_ls = split_content_with_media(
                            content, table_info, image_info, spliter, split_size
                        )
                    else:
                        sub_content_ls = filter_texts_and_images(spliter.split_text(content))
                        img_str_ls = [''] * len(sub_content_ls)
                    if with_position:
                        pdf_path = os.path.join(file_map[json_name], f'{uuid}.pdf')
                        pdf_dict = process_pdf_files(pdf_path, ocr, exclude_words)
                        block_pos_ls = get_pdf_pos(exclude_words, sub_content_ls, pdf_dict)
                    else:
                        block_pos_ls = ["-1"] * len(img_str_ls)
                    append_records(
                        db_data,
                        sub_content_ls,
                        img_str_ls,
                        block_pos_ls,
                        uuid,
                        to_expr,
                        register_dict
                    )
                    temp_index = update_index(name_index, db_data, register_dict[uuid]['name'], file_suffix, temp_index)
                except Exception as e:
                    failed_ls.append({'filePath': fp, 'reason': str(e)})


def bow_similarity(query_set, content_set):
    """计算 query_set 被 content_set 包含的比例（即交集占 query_set 的比例）"""
    if not query_set:
        return 0
    # 利用集合的交集和直接取长度
    return len(query_set & content_set) / len(query_set)


def merge_position(sets_list):
    """
    将一系列按顺序排列且可能有交集的集合合并，
    并转换为[min, max]的范围表示
    """
    if not sets_list:
        return []

    merged = [sets_list[0]]
    for current_set in sets_list[1:]:
        # 如果当前集合与merged中的最后一个集合有交集，则合并
        if merged[-1] & current_set:
            merged[-1] |= current_set
        else:
            merged.append(current_set)
    # 将合并后的每个集合转换成[min, max]形式
    return [[min(pos_set), max(pos_set)] for pos_set in merged]


def map_chunks_to_lines(content, sub_content_ls, threshold=1.0):
    """
    将sub_content_ls中的每个块映射到content的行号上

    参数说明：
    - content：原文本，按行（\n）分隔；
    - sub_content_ls：待映射的文本块列表，每个块也可包含多行；
    - threshold：相似度阈值，默认1.0，需要完全匹配；

    核心过程：
    1. 使用jieba对content的每行分词，生成行号与词集的映射内容_lines；
    2. 对于每个块，统计其所有行的词集形成query_set；
    3. 滑动窗口匹配，将content中的连续window_size行的词集作并集，
       计算与query_set的包含相似度（belong_similarity）；
    4. 如果相似度达到阈值，则记录这组行号，最后对行号组进行合并。
    """
    # 预处理：对content按行分词，生成[(行号, 词集), ...]；这里使用splitlines()更健壮处理换行符
    content_lines = [
        (i + 1, {w for w in jieba.cut(line) if w.strip()})
        for i, line in enumerate(content.splitlines())
        if line.strip()
    ]
    total_lines = len(content_lines)
    positions = []

    for chunk in sub_content_ls:
        # 对块中的每行进行分词
        chunk_tokens_list = [
            {w for w in jieba.cut(line) if w.strip()}
            for line in chunk.splitlines() if line.strip()
        ]
        window_size = len(chunk_tokens_list)
        # 如果当前块为空则直接返回-1
        if window_size == 0:
            positions.append("-1")
            continue

        # 求当前块所有行的词集并集作为query_set
        query_set = set.union(*chunk_tokens_list)
        valid_windows = []

        # 滑动窗口在content_lines中匹配连续window_size行
        for i in range(total_lines - window_size + 1):
            # 提取当前窗口内所有的行号和对应的词集
            window_line_numbers = {line_no for line_no, tokens in content_lines[i: i + window_size]}
            window_tokens = set.union(*(tokens for _, tokens in content_lines[i: i + window_size]))
            # 判断当前窗口词集能否完全覆盖查询词集（>= threshold）
            if bow_similarity(query_set, window_tokens) >= threshold:
                valid_windows.append(window_line_numbers)
        # 合并相邻/有交集的行号集合
        merged_ranges = merge_position(valid_windows)
        positions.append(merged_ranges if merged_ranges else "-1")

    return positions


def split_txt(failed_ls, parse_data, rc_splitter, db_data, name_index, register_dict, with_position, to_expr):
    """
    解析并划分 txt 文件内容，将结果存入 db_data 中。
    """
    temp_index = len(db_data['imagePath'])
    json_path = os.path.join(parse_data, 'txt_content.json')
    if not os.path.exists(json_path):
        return
    with open(json_path, 'r', encoding='utf-8') as file:
        data_dict = json.load(file)
        file_suffix = data_dict['file_suffix']
        content_ls = tqdm(data_dict['file_list'])
        for content_dict in content_ls:
            fp = content_dict['file_path']
            try:
                uuid = content_dict['file_uuid']
                content_ls.set_description(f'为 {uuid[0:8]}划分段落')
                content = content_dict['content']
                sub_content_ls = filter_texts_and_images(rc_splitter.split_text(content))
                n_sub = len(sub_content_ls)
                if with_position:
                    position_ls = [
                        json.dumps({"line": p}, ensure_ascii=False)
                        for p in map_chunks_to_lines(content, sub_content_ls)
                    ]
                else:
                    position_ls = ["-1"] * n_sub
                append_records(
                    db_data,
                    sub_content_ls,
                    [''] * n_sub,
                    position_ls,
                    uuid,
                    to_expr,
                    register_dict
                )
                temp_index = update_index(name_index, db_data, register_dict[uuid]['name'], file_suffix, temp_index)
            except Exception as e:
                failed_ls.append({'filePath': fp, 'reason': str(e)})


def split_name_index(batch_end, name_index):
    """
    这里要解释一下name_index：
        参数name_index包含了块的索引信息以及文件名的属性信息，文件被划为不同的块，意味着不同的快可能对应着同一个文件名。所以当这种情况出现时，
    只需要向量化一次文件名就好，name_index包含了不同的块使用相同文件名的索引，以便于减少文件名的向量化次数。对将向量化的数据进行划分存储（减少内
    存压力）时，此函数将name_index也进行相应的划分处理。
    看不懂的话就算了，这个函数也没啥复用意义，能跑能加速就ok。
    :param batch_end: 划分结束的索引大小
    :param name_index: 块的索引信息以及文件名的属性信息
    :return:
    """
    cur_file_uuid_index = []
    for f, f_suffix, i, j in name_index:
        if j > batch_end:
            cur_file_uuid_index.append([f, f_suffix, i, batch_end])
            del name_index[:len(cur_file_uuid_index)]
            name_index.insert(0, [f, f_suffix, batch_end, j])
            break
        else:
            cur_file_uuid_index.append([f, f_suffix, i, j])
    return cur_file_uuid_index


def split_video_content(register_dict, rc_splitter, db_data, name_index, to_expr):
    temp_index = len(db_data['imagePath'])
    for uuid, v in register_dict.items():
        if v['fileSuffix'] == '1':  # "1"为视频文件
            content = v['fileDesc']
            file_suffix = v['fileName'].split('.')[-1]
            sub_content_ls = filter_texts_and_images(rc_splitter.split_text(content))
            n_sub = len(sub_content_ls)
            append_records(
                db_data,
                sub_content_ls,
                [''] * n_sub,
                ["-1"] * n_sub,
                uuid,
                to_expr,
                register_dict
            )
            temp_index = update_index(name_index, db_data, register_dict[uuid]['name'], file_suffix, temp_index)


def split_file(failed_ls, parse_out_dir, converted_dir, seg_out_dir, split_size, chunk_overlap, register_dict, ocr,
               with_position=False, to_expr=False):
    os.makedirs(seg_out_dir, exist_ok=True)
    rc_splitter = ChineseRecursiveTextSplitter(
        separators=["。|！|？", "；|;\s", "\n\n", "\n", "，|,\s", "\!\s|\?\s"],
        keep_separator=True,
        is_separator_regex=True,
        chunk_size=split_size,
        chunk_overlap=chunk_overlap
    )
    xlm_spliter = ChineseRecursiveTextSplitter(separators=['\n\n\n\n', '\n\n\n', '\n\n', '\n', "。|！|？"],
                                               chunk_size=split_size, chunk_overlap=chunk_overlap)
    unsct_data = {
        'block': [], 'blockPosition': [], 'imagePath': [], 'fileName': [], 'addedValue': []}
    unsct_index = []  # 由于一个文件可能被划为多个块，因此标记重复的文件名，重复的文件名只需向量化一次

    # 图片内容切块
    split_img_content(failed_ls, parse_out_dir, rc_splitter, unsct_data, unsct_index, register_dict, with_position,
                      to_expr)
    # ofd内容切块
    split_ofd_content(failed_ls,rc_splitter, parse_out_dir, unsct_data, unsct_index, split_size, register_dict, with_position,
                      to_expr)
    split_xml_content(failed_ls, parse_out_dir, xlm_spliter, unsct_data, unsct_index, split_size, register_dict,
                      to_expr)
    # doc，docx，pdf内容切块
    split_docx_doc_pdf_content(failed_ls, parse_out_dir, converted_dir, rc_splitter, unsct_data, unsct_index,
                               split_size, register_dict, ocr, with_position, to_expr)
    # xls,xlsx内容切块
    split_xlsx_xls(failed_ls, converted_dir, rc_splitter, unsct_data, unsct_index, split_size, register_dict,
                   with_position, to_expr)
    # txt 内容切块
    split_txt(failed_ls, parse_out_dir, rc_splitter, unsct_data, unsct_index, register_dict, with_position, to_expr)
    segments_path = os.path.join(seg_out_dir, 'unstc_data.json')
    with open(segments_path, 'w+', encoding='utf-8') as f:
        json.dump(unsct_data, f, ensure_ascii=False, indent=4)
    name_index_path = os.path.join(seg_out_dir, 'unstc_index.json')
    with open(name_index_path, 'w+', encoding='utf-8') as f:
        json.dump(unsct_index, f, ensure_ascii=False, indent=4)
    for item in failed_ls:
        base_name = os.path.basename(item['filePath'])
        uuid_dict = register_dict[base_name.split('.')[0]]
        item['filePath'] = uuid_dict['path']
        item['reason'] = item['reason'].replace(base_name, uuid_dict['name'])
    with open(os.path.join(seg_out_dir, f'unstc_failed.json'), 'w+', encoding='utf-8') as f:
        json.dump(failed_ls, f, ensure_ascii=False, indent=4)
    with open(os.path.join(seg_out_dir, f'register.json'), 'w+', encoding='utf-8') as f:
        json.dump(register_dict, f, ensure_ascii=False, indent=4)


# 将数据转换为一个字典或列表，然后遍历进行分割
def split_json(data, max_length):
    # 递归处理嵌套的数据结构
    chunks = []
    current_chunk = []
    current_chunk_length = 0

    # 如果是列表类型
    if isinstance(data, list):
        for item in data:
            item_str = json.dumps(item, ensure_ascii=False)
            item_length = len(item_str)
            # 如果加上这个项后超长，先保存当前 chunk
            if current_chunk_length + item_length > max_length:
                chunks.append(f'[{",".join([json.dumps(i, ensure_ascii=False) for i in current_chunk])}]')
                current_chunk = [item]
                current_chunk_length = item_length
            else:
                current_chunk.append(item)
                current_chunk_length += item_length
        # 添加最后剩余的部分
        if current_chunk:
            chunks.append(f'[{",".join([json.dumps(i, ensure_ascii=False) for i in current_chunk])}]')

    # 如果是字典类型
    elif isinstance(data, dict):
        for key, value in data.items():
            item_str = json.dumps({key: value}, ensure_ascii=False)
            item_length = len(item_str)
            # 如果加上这个键值对后超长，先保存当前 chunk
            if current_chunk_length + item_length > max_length:
                chunks.append(f'{",".join([json.dumps(i, ensure_ascii=False) for i in current_chunk])}')
                current_chunk = [{key: value}]
                current_chunk_length = item_length
            else:
                current_chunk.append({key: value})
                current_chunk_length += item_length
        # 添加最后剩余的部分
        if current_chunk:
            chunks.append(f'{",".join([json.dumps(i, ensure_ascii=False) for i in current_chunk])}')

    return [i for i in chunks if i]


def get_batche_index(ls_len, batch_size):
    """
    获取批量索引,方便迭代，例如：
    ls_len=99500
    batch_size=1000
    输出:
    [(0:1000),(1000,2000), ... ,(99000,99500)]
    :param ls_len:
    :param batch_size:
    :return:
    """
    start = range(0, ls_len, batch_size)
    end = list(range(batch_size, ls_len + batch_size, batch_size))
    end[-1] = ls_len
    return [(s, e) for s, e in zip(start, end)]


def insert_vectors(db_data, db_index, register_dict, embedder, milvus_op, vec_batch_size):
    # 提取 db_data 中的文本列表、名称列表和附加值列表
    text_ls = db_data['block']
    added_ls = db_data['addedValue']

    # 根据文本列表的长度和指定的存储批次大小，获取批次索引
    vec_batches = get_batche_index(len(text_ls), vec_batch_size)

    # 创建进度条
    bar = tqdm(vec_batches, desc='Inserting unstc vectors')
    vec_ids = []
    for item in db_index:
        uuid = item[0]
        for i in range(item[-2], item[-1]):
            sign_id = f'{uuid}-{str(i)}'
            vec_ids.append(sign_id)
        register_dict[uuid]['vecID'] = f'{item[-2]}, {item[-1]}'

    # 预先提取所有字段
    all_fields = {
        "vecID": vec_ids,
        "blockPosition": db_data['blockPosition'],
        "imagePath": db_data['imagePath'],
        "fileUUID": db_data['fileUUID'],
        "fileDesc": db_data['fileDesc'],
        "fileType": db_data["fileType"],
        "srcID": db_data['srcID'],
    }

    # 遍历每个存储批次
    for batch_start, batch_end in bar:
        # 设置当前批次数据
        cur_vec_id = vec_ids[batch_start:batch_end]
        cur_block_ls = text_ls[batch_start:batch_end]
        cur_added_value = added_ls[batch_start:batch_end]

        # 从所有字段中提取当前批次数据
        cur_data = {key: value[batch_start:batch_end] for key, value in all_fields.items()}
        bar.set_description(f'making vectors {batch_start}-{batch_end}')
        # 获取当前批次的文本嵌入向量
        block_embeddings = embedder.encode(cur_block_ls, show_progress_bar=False)
        bar.set_description(f'inserting vectors {batch_start}-{batch_end}')
        # 构建当前批次的数据字典
        cur_db_data = [
            {
                'vecID': cur_vec_id[i],
                'block': cur_block_ls[i],
                'blockDenseEmbeddings': block_embeddings[i],
                'addedValue': cur_added_value[i],
                'blockPosition': cur_data['blockPosition'][i],
                'imagePath': cur_data['imagePath'][i],
                'fileUUID': cur_data['fileUUID'][i],
                'fileDesc': cur_data['fileDesc'][i],
                'fileType': cur_data['fileType'][i],
                # 'srcID': cur_data['srcID'][i],
            }
            for i in range(len(cur_block_ls))  # 遍历当前批次的索引
        ]

        # 插入数据到 Milvus
        milvus_op.collection.insert(cur_db_data)
