import os
import requests
import numpy as np
from typing import List, Optional, Any
import logging
import os.path
import re
import json
import shutil
import pandas as pd
from langchain_text_splitters import RecursiveCharacterTextSplitter
from tqdm import tqdm
from preprocess.table_utils import get_md_tables
import pickle
from preprocess.file_paser.xlsx_parser.xlsx_parse import prepare_excel_table

logger = logging.getLogger(__name__)


def get_batches(ls, batch_size, shuffle=False):
    """
    将ls按batch_size进行切片
    :param shuffle: 是否将切片结果乱序
    :param ls: 列表
    :param batch_size: 批量大小
    :return: 保存切片结果的列表
    """
    ls_len = len(ls)
    start = range(0, ls_len, batch_size)
    end = list(range(batch_size, ls_len + batch_size, batch_size))
    end[-1] = ls_len
    if shuffle:
        import random
        return random.shuffle([ls[s:e] for s, e in zip(start, end)])
    else:
        return [ls[s:e] for s, e in zip(start, end)]


def traverse_dir(path, tails, file_info_ls=None):
    """

    :param path:
    :param tails:
    :param file_info_ls:
    :return: [(文件路径1，文件名1),(文件路径2，文件名2),(文件路径3，文件名3),...,(文件路径n，文件名n)]
    注意：如路经太深python就无法检测该文件了，遇到这种情况请开启 clean_path功能；
    参考： https://blog.csdn.net/baidu_34928905/article/details/131935428
    """
    if file_info_ls is None:
        file_info_ls = set()
    for file in os.listdir(path):
        file_path = os.path.join(path, file)
        if os.path.isdir(file_path):
            traverse_dir(file_path, tails, file_info_ls)
        else:
            file_e_ls = os.path.basename(file_path).split('.')
            file_tail = file_e_ls[-1]
            if (tails is None or file_tail in tails) and not os.path.basename(file_path).startswith('~$'):
                item = (clean_path(file_path), '.'.join(file_e_ls[0:-1]))
                file_info_ls.add(item)
    return file_info_ls


def clean_path(path):
    """
    清理并格式化文件路径，以确保跨平台的兼容性并修复 Windows 的路径长度限制。

    Args:
        path (str): 输入的文件路径，可能包含不同平台的分隔符或相对路径表示。

    Returns:
        str: 清理后的路径，适用于当前操作系统的路径格式。
    """

    # 将路径中的分隔符统一替换为当前操作系统的默认分隔符
    path = path.replace('/', os.sep).replace('\\', os.sep)

    # 在 Windows 上，如果路径长度超过 260 个字符，调整为支持长路径格式
    if os.sep == '\\' and '\\\\?\\' not in path:
        # 计算路径中包含的相对父级路径（".."）的层级数量
        relative_levels = len([directory for directory in path.split(os.sep) if directory == '..'])

        # 获取当前工作目录并按分隔符分割为列表
        cwd = [directory for directory in os.getcwd().split(os.sep)] if ':' not in path else []

        # 组合新路径，去除多余的相对路径，并加上长路径前缀 '\\\\?\\'
        path = '\\\\?\\' + os.sep.join(
            cwd[:len(cwd) - relative_levels] +
            [directory for directory in path.split(os.sep) if directory != ''][relative_levels:]
        )

    return path


def to_batches(ls, batch_size, shuffle=False):
    """
    将ls按batch_size进行切片
    :param shuffle: 是否将切片结果乱序
    :param ls: 列表
    :param batch_size: 批量大小
    :return: 保存切片结果的列表
    """
    ls_len = len(ls)
    start = range(0, ls_len, batch_size)
    end = list(range(batch_size, ls_len + batch_size, batch_size))
    end[-1] = ls_len
    if shuffle:
        import random
        return random.shuffle([ls[s:e] for s, e in zip(start, end)])
    else:
        return [ls[s:e] for s, e in zip(start, end)]


def get_bge_embedding(texts: list[str]) -> np.ndarray:
    res = requests.post(url="http://172.16.3.123:7771/embeddings/", data=json.dumps(texts))
    embeddings = json.loads(res.content.decode("utf-8"))
    return embeddings["embeddings"]


def get_zpoint_embedding(texts: list[str]) -> np.ndarray:
    res = requests.post(url="http://172.16.3.123:7777/embeddings/", data=json.dumps(texts))
    embeddings = json.loads(res.content.decode("utf-8"))["embeddings"]
    result = np.array(embeddings, dtype='float32')
    return result


def get_embedding_similar(text_pairs: list[list[str, str]]) -> np.ndarray:
    res = requests.post(url="http://172.16.2.113:7772/embeddings/", data=json.dumps(text_pairs))
    embeddings = json.loads(res.content.decode("utf-8"))["embeddings"]
    result = np.array(embeddings, dtype='float32')
    return result


def _split_text_with_regex_from_end(
        text: str, separator: str, keep_separator: bool
) -> List[str]:
    # Now that we have the separator, split the text
    if separator:
        if keep_separator:
            # The parentheses in the pattern keep the delimiters in the result.
            _splits = re.split(f"({separator})", text)
            splits = ["".join(i) for i in zip(_splits[0::2], _splits[1::2])]
            if len(_splits) % 2 == 1:
                splits += _splits[-1:]
            # splits = [_splits[0]] + splits
        else:
            splits = re.split(separator, text)
    else:
        splits = list(text)
    return [s for s in splits if s != ""]


class ChineseRecursiveTextSplitter(RecursiveCharacterTextSplitter):
    def __init__(
            self,
            separators: Optional[List[str]] = None,
            keep_separator: bool = True,
            is_separator_regex: bool = True,
            **kwargs: Any,
    ) -> None:
        """Create a new TextSplitter."""
        super().__init__(keep_separator=keep_separator, **kwargs)
        self._separators = separators or [
            "\n\n",
            "\n",
            "。|！|？",
            "\.\s|\!\s|\?\s",
            "；|;\s",
            "，|,\s"
        ]
        self._is_separator_regex = is_separator_regex

    def _split_text(self, text: str, separators: List[str]) -> List[str]:
        """Split incoming text and return chunks."""
        final_chunks = []
        # Get appropriate separator to use
        separator = separators[-1]
        new_separators = []
        for i, _s in enumerate(separators):
            _separator = _s if self._is_separator_regex else re.escape(_s)
            if _s == "":
                separator = _s
                break
            if re.search(_separator, text):
                separator = _s
                new_separators = separators[i + 1:]
                break

        _separator = separator if self._is_separator_regex else re.escape(separator)
        splits = _split_text_with_regex_from_end(text, _separator, self._keep_separator)

        # Now go merging things, recursively splitting longer texts.
        _good_splits = []
        _separator = "" if self._keep_separator else separator
        for s in splits:
            if self._length_function(s) < self._chunk_size:
                _good_splits.append(s)
            else:
                if _good_splits:
                    merged_text = self._merge_splits(_good_splits, _separator)
                    final_chunks.extend(merged_text)
                    _good_splits = []
                if not new_separators:
                    final_chunks.append(s)
                else:
                    other_info = self._split_text(s, new_separators)
                    final_chunks.extend(other_info)
        if _good_splits:
            merged_text = self._merge_splits(_good_splits, _separator)
            final_chunks.extend(merged_text)
        return [re.sub(r"\n{2,}", "\n", chunk.strip()) for chunk in final_chunks if chunk.strip() != ""]


def rmb_trans(rmb_amount):
    """
    将中文大写金额字符转换成阿拉伯数字。

    参数:
        rmb_amount (str): 中文大写金额字符，例如 "壹亿贰仟叁佰万肆仟伍佰陆拾元柒角捌分"。

    返回:
        float: 转换后的阿拉伯数字金额。
    """

    # 定义中文数字和阿拉伯数字的对应关系
    chinese_num = {'零': 0, '壹': 1, '贰': 2, '叁': 3, '肆': 4, '伍': 5, '陆': 6, '柒': 7, '捌': 8, '玖': 9}
    # 定义中文金额单位与对应的倍数关系
    chinese_amount = {'分': 0.01, '角': 0.1, '元': 1, '拾': 10, '佰': 100, '仟': 1000, '圆': 1}

    amount_float = 0  # 初始化结果金额

    # 处理 "亿" 单位的金额
    if '亿' in rmb_amount:
        # 提取 "亿" 前的部分
        yi = re.match(r'(.+)亿.*', rmb_amount).group(1)
        amount_yi = 0

        # 遍历中文金额单位，计算 "亿" 部分的数值
        for i in chinese_amount:
            if i in yi:
                amount_yi += chinese_num[yi[yi.index(i) - 1]] * chinese_amount[i]

        # 如果最后一个字符是数字，直接加到 amount_yi 中
        if yi[-1] in chinese_num.keys():
            amount_yi += chinese_num[yi[-1]]

        # 将 "亿" 部分的金额转为亿的单位，并加到总金额中
        amount_float += amount_yi * 100000000
        # 移除已处理的 "亿" 部分
        rmb_amount = re.sub(r'.+亿', '', rmb_amount, count=1)

    # 处理 "万" 单位的金额
    if '万' in rmb_amount:
        # 提取 "万" 前的部分
        wan = re.match(r'(.+)万.*', rmb_amount).group(1)
        amount_wan = 0

        # 遍历中文金额单位，计算 "万" 部分的数值
        for i in chinese_amount:
            if i in wan:
                amount_wan += chinese_num[wan[wan.index(i) - 1]] * chinese_amount[i]

        # 如果最后一个字符是数字，直接加到 amount_wan 中
        if wan[-1] in chinese_num.keys():
            amount_wan += chinese_num[wan[-1]]

        # 将 "万" 部分的金额转为万的单位，并加到总金额中
        amount_float += amount_wan * 10000
        # 移除已处理的 "万" 部分
        rmb_amount = re.sub(r'.+万', '', rmb_amount, count=1)

    # 处理 "元" 单位及其以下的金额
    amount_yuan = 0
    for i in chinese_amount:
        if i in rmb_amount:
            # 判断中文数字是否存在于当前单位之前
            if rmb_amount[rmb_amount.index(i) - 1] in chinese_num.keys():
                amount_yuan += chinese_num[rmb_amount[rmb_amount.index(i) - 1]] * chinese_amount[i]

    # 将 "元" 及其以下的金额加到总金额中
    amount_float += amount_yuan

    return amount_float


def clean_content(content):
    content = content.lower()
    content = re.sub('[\\-_/:.#=?@]', '', content)
    return content


def get_costumed_features(content_ls):
    """
    Process a list of content strings to extract specific features.

    Parameters:
        content_ls (list of str): A list of strings containing the content to process.

    Returns:
        list of str: A list where each entry corresponds to the processed features of the
                     respective content in the input list, joined by newlines.
    """

    res = []  # List to store processed features for each content entry.
    exclude_words = {'metaindex', 'unnamed'}  # Words to exclude from the results.

    for content in content_ls:
        # Regular expression to match Chinese currency strings in uppercase format.
        pattern = r"[壹贰叁肆伍陆柒捌玖拾佰仟][壹贰叁肆伍陆柒捌玖拾佰仟元角万分百整零]+[壹贰叁肆伍陆柒捌玖拾佰仟元角万分百整零]"

        # Find all matches of Chinese currency notation in the content.
        money_ch = re.findall(pattern, content)

        # Convert each Chinese currency notation to Arabic numeral format using rmb_trans.
        money_ch_set = set(str(rmb_trans(i)) for i in money_ch)

        # Clean the content (assuming clean_content is defined elsewhere).
        content = clean_content(content)

        # Find all alphanumeric sequences of at least 3 characters in the content.
        num_letters = set(re.findall('[a-z0-9]{3,}', content))

        # Remove excluded words from the found alphanumeric sequences.
        num_letters = set(i for i in num_letters if i not in exclude_words)

        # Combine found alphanumeric sequences with converted currency values.
        if money_ch_set:
            num_letters = num_letters | money_ch_set

        # Convert the set to a list to maintain a sequence.
        num_letters = list(num_letters)

        # Join the results with newlines if any features were found; otherwise, add an empty string.
        if num_letters:
            res.append('\n'.join(num_letters))
        else:
            res.append('')

    return res


########################## 以下存放和main.py文件相关的函数 ###########################
def make_dir():
    """
    创建所需目录
    :return:
    """
    dirs = './dataset'
    if os.path.exists(dirs):
        import shutil
        shutil.rmtree(dirs)
    os.makedirs(f'{dirs}/doc_files')
    os.makedirs(f'{dirs}/docx_files')
    os.makedirs(f'{dirs}/xls_files')
    os.makedirs(f'{dirs}/xlsx_files')
    os.makedirs(f'{dirs}/pdf_files')
    os.makedirs(f'{dirs}/ofd_files')
    os.makedirs(f'{dirs}/xml_files')
    os.makedirs(f'{dirs}/img_files')

    path = './out/parse_split/parse_out'
    if not os.path.exists(path):
        os.makedirs(path)
    path = 'out/parse_split/split_out'
    if not os.path.exists(path):
        os.makedirs(path)
    path = 'out/ofd_unziped'
    if not os.path.exists(path):
        os.makedirs(path)


def classify_files(data_dir, classify_to_path, is_copy_xml=True):
    """
    将目标文件夹data_dir包含的所有文件按归类classify_to_path文件夹中，方便数据测试；
    分类：[pdf,docx,doc，xls，xlsx,xml,ofd,img]
    :param is_copy_xml: 是否将xml文件copy到classify_to_path，由于处理ofd文件时已被作为xml解析，因此此参数管理ofd的xml不需要再被保存
    :param data_dir: 数据集文件夹
    :param classify_to_path:归类后的存放路径
    :return:
    """
    import shutil
    process_types = {'doc', 'DOC', 'docx', 'DOCX', 'pdf', 'PDF',
                     'xls', 'XLS', 'xlsx', 'XLSX',
                     'ofd', 'OFD', 'XML', 'xml',
                     'jpg', 'jpeg', 'png', 'jfif', 'exif', 'gif', 'tiff', 'bmp'
                     }
    file_iters = traverse_dir(data_dir, process_types)
    for idx, (f_path, f_name) in enumerate(file_iters):
        file_type = f_path.split('.')[-1]
        if file_type in {'doc', 'DOC'}:
            des_file = f'{classify_to_path}/doc_files/{f_name}.doc'
        elif file_type in {'docx', 'DOCX'}:
            des_file = f'{classify_to_path}/docx_files/{f_name}.docx'
        elif file_type in {'pdf', 'PDF'}:
            des_file = f'{classify_to_path}/pdf_files/{f_name}.pdf'
        elif file_type in {'xls', 'XLS'}:
            des_file = f'{classify_to_path}/xls_files/{f_name}.xls'
        elif file_type in {'xlsx', 'XLSX'}:
            des_file = f'{classify_to_path}/xlsx_files/{f_name}.xlsx'
        elif file_type in {'ofd', 'OFD'}:
            des_file = f'{classify_to_path}/ofd_files/{f_name}.ofd'
        elif is_copy_xml and file_type in {'XML', 'xml'}:
            des_file = f'{classify_to_path}/xml_files/{f_name}.xml'
        elif file_type in ('jpg', 'jpeg', 'png', 'jfif', 'exif', 'gif', 'tiff', 'bmp'):
            des_file = f'{classify_to_path}/img_files/{f_name}.{file_type}'
        else:
            des_file = None

        if des_file is not None:
            if not os.path.exists(des_file):
                shutil.copyfile(f_path, des_file)
            else:
                path_e = des_file.split('/')[0:-1]
                des_file = '/'.join(path_e) + '/' + f_name + '_' + str(idx) + '.' + file_type
                shutil.copyfile(f_path, des_file)


def unzip_files(path_all):
    """
    解压path_all路径下所有的zip文件
    :param path_all: 可能含有zip文件的文件夹
    :return:
    """
    import zipfile
    doc_iters = traverse_dir(path_all, ['zip', 'ZIP'])
    for path_each, f_name in doc_iters:
        # 遍历所有需要压缩的文件夹
        print("正在解压：%s" % path_each)
        path_decompress = path_each
        unzip_path = path_decompress[:-4]
        zip_file = zipfile.ZipFile(str(path_decompress), 'r', metadata_encoding='gbk')  # 压缩文件位置
        name_list = zip_file.namelist()
        if name_list:
            for idx, file in enumerate(zip_file.namelist()):
                if file.endswith('\\') and os.path.exists(file):
                    shutil.rmtree(file)
                zip_file.extract(file, unzip_path)  # 解压位置,pwd="1234".encode('utf-8')
            zip_file.close()


def parse_ofd_files(data_dir, out_dir, ofd_unzip_path, block_max_len):
    from file_paser.ofd_paser.ofd_parse import parse_ofd
    parse_ofd(data_dir, out_dir, ofd_unzip_path, block_max_len)
    unzip_files(ofd_unzip_path)


# 文件转换
def file_conversion(data_dir):
    """
    Convert files in a directory to updated formats:
    1. Convert .doc files to .docx format.
    2. Convert .pdf files to .docx format.
    3. Convert .xls files to .xlsx format.

    Parameters:
        data_dir (str): Path to the directory containing files to be converted.
    """

    # Convert .doc files to .docx
    doc_files = os.path.join(data_dir, 'doc_files')
    iter_files = traverse_dir(doc_files, ['doc'])  # Locate .doc files
    if len(iter_files) > 0:
        doc2docx_files = os.path.join(data_dir, 'doc2docx_files')
        if not os.path.exists(doc2docx_files):
            os.makedirs(doc2docx_files)
        from office import word
        word.doc2docx(doc_files, doc2docx_files)  # Convert .doc to .docx

    # Convert .pdf files to .docx
    pdf_files = os.path.join(data_dir, 'pdf_files')
    iter_files = traverse_dir(pdf_files, ['pdf'])  # Locate .pdf files
    if len(iter_files) > 0:
        pdf2docx_files = os.path.join(data_dir, 'pdf2docx_files')
        if not os.path.exists(pdf2docx_files):
            os.makedirs(pdf2docx_files)
        from pdf2docx import parse as pdf_parse
        for file, file_name in iter_files:
            name = file_name + '.docx'
            pdf_parse(file, os.path.join(pdf2docx_files, name))  # Convert each .pdf to .docx

    # Convert .xls files to .xlsx
    xls_files = os.path.join(data_dir, 'xls_files')
    iter_files = traverse_dir(xls_files, ['xls'])  # Locate .xls files
    if len(iter_files) > 0:
        xls2xlsx_files = os.path.join(data_dir, 'xls2xlsx_files')
        if not os.path.exists(xls2xlsx_files):
            os.makedirs(xls2xlsx_files)
        from xls2xlsx import XLS2XLSX
        for file, file_name in iter_files:
            name = file_name + '.xlsx'
            x2x = XLS2XLSX(file)
            x2x.to_xlsx(os.path.join(xls2xlsx_files, name))  # Convert each .xls to .xlsx


def parse_files(data_dir, out_dir, unzip_dir, image_saved_dir, block_max_len):
    """
    1. 将doc、pdf文件转换为docx文件，将xls文件转换为xlsx文件；
    2. 解析docx、ofd和xml文件内容并保存在out_dir路径下。

    参数:
    :param data_dir: 待解析文件的路径
    :param out_dir: 文件解析后的输出路径
    :param unzip_dir: zip文件解压的临时存储路径
    :param image_saved_dir: 图片文件保存路径
    :param block_max_len: for ofd preprocessing
    """
    # 如果指定的图片保存目录不存在，则创建该目录
    if not os.path.exists(image_saved_dir):
        os.makedirs(image_saved_dir)

    # 导入处理docx、doc和pdf文件的模块
    from file_paser.docx_paser.docx_parse import parse_docx, parse_doc, parse_pdf

    # 设置路径变量以便处理各类文件
    docx_files = os.path.join(data_dir, 'docx_files')
    if os.path.exists(docx_files):
        # 解析docx文件，并将内容和图像保存到指定输出路径
        parse_docx(docx_files, out_dir, image_saved_dir)

    doc_files = os.path.join(data_dir, 'doc2docx_files')
    if os.path.exists(doc_files):
        # 解析doc文件，转换为docx格式后处理
        parse_doc(doc_files, out_dir, image_saved_dir)

    pdf_files = os.path.join(data_dir, 'pdf2docx_files')
    if os.path.exists(pdf_files):
        # 解析pdf文件，转换为docx格式后处理
        parse_pdf(pdf_files, out_dir, image_saved_dir)

    # 导入处理xml文件的模块并解析xml文件
    from file_paser.xml_paser.xml_parse import parse_xml
    parse_xml(data_dir, out_dir)

    # 解析ofd文件（如果存在）
    parse_ofd_files(data_dir, out_dir, unzip_dir, block_max_len)

    # 导入处理图片文件的模块并解析图像文件
    from file_paser.img_parser.img_parse import parse_img
    parse_img(data_dir, out_dir, image_saved_dir)


def split_content_with_media(content, table_info, image_info, spliter, split_size):
    """
    这是一个分割包含表格和图片信息的内容的函数

    参数:
    :param content: 要切块的内容，包含正文和特殊标签
    :param table_info: 表格信息字典，包含表格内容及其标识符
    :param image_info: 图片信息字典，包含图片内容、路径及其标识符
    :param spliter: 用于文本切割的工具或对象
    :param split_size: 单个块的最大大小
    :return: 两个列表 (split_ls, img_path_ls)
             split_ls: 切割后的文本块列表
             img_path_ls: 与文本块对应的图片路径列表（若无图片则为空字符串）
    """
    split_ls, img_path_ls = [], []

    # 判断是否有表格或图片信息
    if not (table_info or image_info):
        text_ls = filter_texts_and_images(spliter.split_text(content))
        return text_ls, [''] * len(text_ls)

    # 根据标签 <table:...> 和 <image:...> 分割内容
    content_blocks = re.split(r'(<[tableimg]+:[^>]+>)', content)

    for block in content_blocks:
        block = block.strip()
        if not block:
            continue  # 跳过空白块

        # 检查表格和图片的标识符
        table_key = re.findall(r'<table:([^>]+)>', block)
        image_key = re.findall(r'<image:([^>]+)>', block)

        # 表格标识符块处理
        if table_key:
            table_dic = table_info.get(table_key[0])
            if table_dic:
                sub_tables, exceed_cell_ls = get_md_tables(table_dic, split_size)
                split_ls.extend(sub_tables)
                img_path_ls.extend([''] * len(sub_tables))

                # 处理超长单元格内容
                if exceed_cell_ls:
                    exceed_text_ls = spliter.split_text('\n'.join(exceed_cell_ls))
                    split_ls.extend(exceed_text_ls)
                    img_path_ls.extend([''] * len(exceed_text_ls))

        # 图片标识符块处理
        elif image_key:
            image_data = image_info.get(image_key[0])
            if image_data:
                image_content, image_path = image_data['content'].strip(), image_data['path']
                text_ls = spliter.split_text(image_content)
                split_ls.extend(text_ls)
                img_path_ls.extend([image_path] * len(text_ls))

        # 普通文本块处理
        else:
            text_ls = filter_texts_and_images(spliter.split_text(block))
            split_ls.extend(text_ls)
            img_path_ls.extend([''] * len(text_ls))

    return split_ls, img_path_ls


# 将文件名和文件内容向量化
def prepare_dense_vectors(name_index, text_ls, batch_size, description=''):
    """
    生成文件名和文件内容的稠密向量表示。

    参数:
    :param name_index: 文件名和文件范围信息的列表，每个元素包含文件名、文件后缀、开始和结束索引
    :param text_ls: 文件内容的字符串列表
    :param batch_size: 每次处理的文本块大小
    :param description: 显示在进度条中的描述信息

    返回:
    :return: 文件内容的向量列表，文件名的稠密向量列表
    """
    name_embeddings = []  # 存储每个文件名的稠密向量
    name_bar = tqdm(name_index)

    # 为每个文件名生成向量并扩展至相应范围
    for f, _, s, e in name_bar:
        embedding = get_bge_embedding([f])[0]  # 获取单个文件名的向量
        name_embeddings.extend([embedding] * (e - s))  # 将同一向量扩展到文件名的所有位置
        name_bar.set_description(f'{description}-name')  # 更新进度条描述

    # 将文本内容分批处理
    text_batches = to_batches(text_ls, batch_size)
    text_embeddings = []  # 存储每批文本内容的向量
    text_bar = tqdm(text_batches)

    # 为每个文本块生成向量
    for batch in text_bar:
        embeddings = get_bge_embedding(batch)  # 获取文本块的向量
        text_embeddings.extend(embeddings)  # 将所有向量加入结果列表
        text_bar.set_description(f'{description}-text')  # 更新进度条描述
    return text_embeddings, name_embeddings  # 返回文本和文件名的向量


def re_filter(content_str):
    """
    用正则表达式过滤字符串，连续的空格等
    :param content_str:
    :return: 过滤后的content_str
    """
    content_str = re.sub('[\\t\\r ]{2,}', r' ', content_str)
    content_str = re.sub('\n{2,}', '\n', content_str)
    return content_str.strip()


def split_xlsx_xls(rc_splitter, db_data, name_index, split_size):
    """
    解析并划分xlsx和xls文件内容，并保存到db_data中。注意：需先将xls文件转换为xlsx格式。

    参数：
        rc_splitter: 文本分割器对象，用于处理长文本。
        db_data: 存储解析后数据的字典，用于插入数据库。
        name_index: 列表，用于记录文件名、文件类型以及内容在db_data中的位置。
        split_size: int类型，指定文本分割的最小长度。
    """

    def parse_xlsx_content(files, tail):
        # 初始化db_data中的索引起始位置
        temp_index = len(db_data['image_path'])

        for file_id, (file, file_name) in enumerate(files):
            # 读取Excel文件的所有工作表
            sheets = pd.read_excel(file, sheet_name=None).items()

            for sheet_name, df in sheets:
                fs_name = f'{file_name}-{sheet_name}'

                # 解析表格内容和额外数据
                table_ls, extra_data_ls = prepare_excel_table(df, max_len=split_size)
                len_table_ls = len(table_ls)

                # 将表格内容存入db_data
                db_data['text'].extend(table_ls)
                db_data['added_value'].extend(get_costumed_features(table_ls))
                db_data['image_path'].extend([''] * len_table_ls)
                db_data['name'].extend([fs_name] * len_table_ls)
                db_data['type'].extend([tail] * len_table_ls)

                # 处理额外数据并进行文本分割
                if extra_data_ls:
                    for extra_data in extra_data_ls:
                        split_texts = rc_splitter.split_text(extra_data)
                        db_data['text'].extend(split_texts)
                        db_data['added_value'].extend(get_costumed_features(split_texts))
                        db_data['image_path'].extend([''] * len(split_texts))
                        db_data['name'].extend([fs_name] * len(split_texts))
                        db_data['type'].extend([tail] * len(split_texts))

                # 记录新内容在db_data中的位置索引
                text_len = len(db_data['text'])
                if text_len > temp_index:
                    name_index.append([fs_name, '.xlsx', temp_index, text_len])
                temp_index = text_len

    # 获取目录中的xlsx和已转换的xls文件
    xlsx_files = traverse_dir('dataset/xlsx_files', ['xlsx'])
    xls_files = traverse_dir('dataset/xls2xlsx_files', ['xlsx'])

    # 解析并保存xlsx文件内容
    parse_xlsx_content(xlsx_files, tail='.xlsx')
    # 解析并保存xls文件内容
    parse_xlsx_content(xls_files, tail='.xls')


def filter_texts_and_images(text_list):
    """
    Filter out entries in text_list that contain short alphanumeric sequences (1 to 3 characters).
    If the filtered text is not empty, retain it and its corresponding image path in the result lists.

    Args:
        text_list (list of str): List of text entries to be filtered.

    Returns:
        tuple: Two lists - one with filtered text entries and one with the corresponding image paths.
    """
    filtered_texts = []
    pattern = re.compile(r'[0-9a-zA-Z ]{1,3}')
    for i, text in enumerate(text_list):

        # Remove short alphanumeric sequences (1 to 3 characters)
        cleaned_text = re.sub(pattern, '', text)

        # Only keep non-empty texts and their corresponding image paths
        if len(cleaned_text) > 0:
            filtered_texts.append(text)
    return filtered_texts


def split_docx_doc_pdf_xml_ofd_img(parse_data, ofd_xlm_spliter, rc_splitter, db_data, name_index, split_size):
    """
    划分docx, doc, xml, ofd和img文件，并保存在db_data中，注意需要先将doc转为docx文件
    :param parse_data: 文件目录
    :param ofd_xlm_spliter: 划分xml和ofd的文本分割器
    :param rc_splitter: 划分doc和docx的文本分割器
    :param db_data: 插入数据库的数据字典
    :param name_index: 记录文件名，文件类型，以及文件名在db_data中的位置
    :param split_size: 文本分割时的最小长度
    """
    # 获取当前数据库中图像路径的数量，用于后续索引
    temp_index = len(db_data['image_path'])
    # 遍历指定目录下的所有json文件
    iter_file = traverse_dir(parse_data, ['json'])
    for json_path, _ in iter_file:
        # 打开并读取每个json文件
        with open(json_path, 'r', encoding='utf-8') as file:
            data_dict = json.load(file)  # 解析json数据
            file_suffix = data_dict['file_suffix']  # 获取文件后缀
            content_ls = data_dict['file_list']  # 获取文件内容列表

            # 根据文件后缀选择适当的分割器
            spliter = ofd_xlm_spliter if file_suffix == '.xml' or file_suffix == '.ofd' else rc_splitter

            # 遍历每个内容字典
            for content_dict in content_ls:
                content = content_dict['content']  # 获取内容文本
                image_info = content_dict['image_info']  # 获取图片信息
                table_info = content_dict['table_info']  # 获取表格信息

                # 如果有表格或图片信息，使用带媒体的内容分割方法
                if table_info or image_info:
                    sub_content_ls, img_str_ls = split_content_with_media(
                        content, table_info, image_info, spliter, split_size
                    )
                else:
                    # 如果没有媒体，使用普通文本分割
                    sub_content_ls = filter_texts_and_images(spliter.split_text(content))
                    img_str_ls = [''] * len(sub_content_ls)  # 图片字符串列表与文本列表对齐

                len_sub_content_ls = len(sub_content_ls)  # 划分后子内容的长度
                # 将自定义特征添加到数据库数据字典
                db_data['added_value'].extend(get_costumed_features(sub_content_ls))
                db_data['text'].extend(sub_content_ls)  # 添加文本内容
                db_data['image_path'].extend(img_str_ls)  # 添加图片路径
                db_data['name'].extend([content_dict['file_name']] * len_sub_content_ls)  # 添加文件名
                db_data['type'].extend([file_suffix] * len_sub_content_ls)  # 添加文件类型

                text_len = len(db_data['text'])  # 当前文本总长度
                # 如果新添加的文本长度大于之前的索引，更新name_index
                if text_len - temp_index > 0:
                    name_index.append([content_dict['file_name'], file_suffix, temp_index, text_len])
                temp_index = text_len  # 更新临时索引为当前文本长度


def split_name_index(batch_end, name_index):
    """
    这里要解释一下name_index：
        参数name_index包含了块的索引信息以及文件名的属性信息，文件被划为不同的块，意味着不同的快可能对应着同一个文件名。所以当这种情况出现时，
    只需要向量化一次文件名就好，name_index包含了不同的块使用相同文件名的索引，以便于减少文件名的向量化次数。对将向量化的数据进行划分存储（减少内
    存压力）时，此函数将name_index也进行相应的划分处理。
    看不懂的话就算了，这个函数也没啥复用意义，能跑能加速就ok。
    :param batch_end: 划分结束的索引大小
    :param name_index: 块的索引信息以及文件名的属性信息
    :return:
    """
    cur_file_name_index = []
    for f, f_suffix, i, j in name_index:
        if j > batch_end:
            cur_file_name_index.append([f, f_suffix, i, batch_end])
            del name_index[:len(cur_file_name_index)]
            name_index.insert(0, [f, f_suffix, batch_end, j])
            break
        else:
            cur_file_name_index.append([f, f_suffix, i, j])
    return cur_file_name_index


def get_batche_index(ls_len, batch_size):
    """
    获取批量索引,方便迭代，例如：
    ls_len=99500
    batch_size=1000
    输出:
    [(0:1000),(1000,2000), ... ,(99000,99500)]
    :param ls_len:
    :param batch_size:
    :return:
    """
    start = range(0, ls_len, batch_size)
    end = list(range(batch_size, ls_len + batch_size, batch_size))
    end[-1] = ls_len
    return [(s, e) for s, e in zip(start, end)]


def save_zh_only(string_):
    """
    string_只保留中文
    Args:
        string_:
    Returns:

    """
    string_ = re.sub(r"无\|", '', string_)
    string_ = re.sub('[|\n ]', '，', string_)
    a = re.sub(r"[^\u4e00-\u9fa5\uff00-\uffef]+", '', string_)
    return '\n' + re.sub(r"，{2,}", '，', a)


# milvus专用
def prepare_sparse_vectors(text_ls, name_ls, out_dir):
    """
    注：只用于Milvus向量数据库
    Args:
        text_ls: 文件正文列表
        name_ls: 文件名称列表
        out_dir: BM25向量函数（EmbeddingFunction，emf）的存储目录

    Returns:
        文件正文稀疏向量，文件名稀疏向量
    """
    # 在每个文本前添加' NAN，'以处理空值情况
    text_ls = ['NAN，' + i for i in text_ls]
    print('Making sparse vectors ..')  # 打印提示信息，表示开始生成稀疏向量

    # 设置BM25向量函数存储路径
    bm25_emf_path = os.path.join(out_dir, 'bm25_emf_path')

    # 创建存储路径，如果不存在则创建
    os.makedirs(bm25_emf_path, exist_ok=True)

    # 导入BM25EmbeddingFunction和默认分析器构建器
    from milvus_model.sparse import BM25EmbeddingFunction
    from milvus_model.sparse.bm25.tokenizers import build_default_analyzer

    # 构建适用于中文的默认分析器（可以支持其他语言）
    analyzer = build_default_analyzer(language="zh")  # en zh de jp

    def create_bm25_embedding_function(data_list, file_name):
        """创建BM25稀疏向量函数并返回稀疏向量"""
        bm25_ef = BM25EmbeddingFunction(analyzer)
        bm25_ef.fit(data_list)  # 拟合数据列表
        sparse_embedding = bm25_ef.encode_documents(data_list)  # 编码为稀疏向量
        bm25_ef.save(os.path.join(bm25_emf_path, file_name))  # 保存稀疏向量函数
        return sparse_embedding

    # 创建文本和名称的BM25稀疏向量
    text_sparse_embedding = create_bm25_embedding_function(text_ls, "text_bm25_ef.json")
    name_sparse_embedding = create_bm25_embedding_function(name_ls, "name_bm25_ef.json")

    print('Completed. ')  # 打印提示信息，表示稀疏向量生成完成
    return text_sparse_embedding, name_sparse_embedding  # 返回生成的文本和名称稀疏向量


def split_to_chunk(parse_data_dir, out_data_dir, split_size):
    rc_splitter = ChineseRecursiveTextSplitter(
        separators=["。|！|？", "；|;\s", "\n\n", "\n", "，|,\s", "\!\s|\?\s", ' ', ''],
        keep_separator=True,
        is_separator_regex=True,
        chunk_size=split_size,
        chunk_overlap=0
    )
    xlm_spliter = ChineseRecursiveTextSplitter(separators=['\n\n\n\n', '\n\n\n', '\n\n', '\n', "。|！|？", ' ', ''],
                                               chunk_size=split_size, chunk_overlap=0)
    db_data = {'text': [], 'name': [], 'added_value': [], 'image_path': [], 'type': []}
    name_index = []  # 由于一个文件可能被划为多个块，因此标记重复的文件名，重复的文件名只需向量化一次
    # doc，docx，pdf，xml，ofd内容切块
    split_docx_doc_pdf_xml_ofd_img(parse_data_dir, xlm_spliter, rc_splitter, db_data, name_index, split_size)
    # xls,xlsx内容切块
    split_xlsx_xls(rc_splitter, db_data, name_index, split_size)
    segments_path = os.path.join(out_data_dir, 'segment.json')
    with open(segments_path, 'w+', encoding='utf-8') as f:
        json.dump(db_data, f, ensure_ascii=False)
    name_index_path = os.path.join(out_data_dir, 'name_index.json')
    with open(name_index_path, 'w+', encoding='utf-8') as f:
        json.dump(name_index, f, ensure_ascii=False)


def prepare_vectors(out_dir, split_size, store_batch_size):
    """
    处理文本数据，将其按块划分并进行向量化处理，最后将处理结果保存到指定目录下。

    1. 按块划分文件并添加到db_data；
    2. 对块内容及其文件名进行向量化处理；
    3. 将db_data保存到out_path路径下

    :param store_batch_size: 将向量化的数据按此大小划分存储，减少内存压力
    :param out_dir: 向量保存路径
    :param split_size: 切块大小
    :return: None
    """
    # 定义 segments.json 和 name_index.json 文件的路径
    segments_path = os.path.join(out_dir, 'segment.json')
    name_index_path = os.path.join(out_dir, 'name_index.json')

    # 读取 segments.json 文件，加载数据到 db_data 字典中
    with open(segments_path, 'r', encoding='utf-8') as file:
        db_data = json.load(file)

    # 读取 name_index.json 文件，加载文件名索引数据
    with open(name_index_path, 'r', encoding='utf-8') as file:
        name_index = json.load(file)

    # 提取 db_data 中的文本列表、名称列表和附加值列表
    text_ls, name_ls, added_ls = db_data['text'], db_data['name'], db_data['added_value']

    # 准备稀疏向量（假设函数 prepare_sparse_vectors 已定义）
    text_sparse_vecs, name_sparse_vecs = prepare_sparse_vectors(text_ls, name_ls, out_dir)

    # 根据文本列表的长度和指定的存储批次大小，获取批次索引
    store_batches = get_batche_index(len(text_ls), store_batch_size)

    # 遍历每个存储批次
    for batch_start, batch_end in store_batches:
        # 从文本、图像路径、名称、附加值和类型中提取当前批次的数据
        cur_text_ls = text_ls[batch_start:batch_end]
        cur_image_path = db_data['image_path'][batch_start:batch_end]
        cur_names = name_ls[batch_start:batch_end]
        cur_added_value = added_ls[batch_start:batch_end]
        cur_types = db_data['type'][batch_start:batch_end]
        cur_text_sparse_vec = text_sparse_vecs[batch_start:batch_end]
        cur_name_sparse_vec = name_sparse_vecs[batch_start:batch_end]

        # 根据当前批次的结束索引和文件名索引，获取当前文件名索引
        cur_file_name_index = split_name_index(batch_end, name_index)

        # 准备稠密向量（假设函数 prepare_dense_vectors 已定义）
        text_dense_vecs, name_dense_vecs = prepare_dense_vectors(
            cur_file_name_index,
            cur_text_ls,
            100,  # 假设 100 是特征维度
            f'Making dense vectors-{batch_start}-{batch_end}'  # 进度描述
        )  # 向量转换

        # 构建当前批次的数据字典
        cur_db_data = [
            {
                'text': cur_text_ls[i],  # 文本内容
                'name': cur_names[i],  # 名称
                'type': cur_types[i],  # 类型
                'added_value': cur_added_value[i],  # 附加值
                'image_path': cur_image_path[i],  # 图像路径
                'text_sparse_vectors': cur_text_sparse_vec[i:i + 1],  # 稀疏文本向量
                'name_sparse_vectors': cur_name_sparse_vec[i:i + 1],  # 稀疏名称向量
                'text_dense_vectors': text_dense_vecs[i],  # 稠密文本向量
                'name_dense_vectors': name_dense_vecs[i],  # 稠密名称向量
            }
            for i in range(len(cur_text_ls))  # 遍历当前批次的索引
        ]

        # 构建保存当前批次数据的文件路径
        prepared_data = os.path.join(out_dir, f'MaxLen_{split_size}_BGE_{batch_start}_{batch_end}.pkl')

        # 将当前批次数据序列化并保存到文件中
        with open(prepared_data, 'wb') as f:
            pickle.dump(cur_db_data, f)
