"""处理输入文档，分块并准备向量存储。"""
import os
from datetime import datetime

from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders.markdown import \
    UnstructuredMarkdownLoader
from langchain_text_splitters import MarkdownTextSplitter

from src.spark_edu_rag.rag_qa.document_loaders import (OCRDOCLoader,
                                                       OCRIMGLoader,
                                                       OCRPDFLoader,
                                                       OCRPPTLoader)
from src.spark_edu_rag.rag_qa.text_spliter import ChineseRecursiveTextSplitter
from src.spark_edu_rag.utils import config_ini, get_logger

# 配置日志
logger = get_logger(__name__)

# 文件扩展器
document_loader = {
    ".txt": TextLoader,
    ".pdf": OCRPDFLoader,
    ".doc": OCRDOCLoader,
    ".ppt": OCRPPTLoader,
    "pptx": OCRPPTLoader,
    ".png": OCRIMGLoader,
    ".jpg": OCRIMGLoader,
    ".md": UnstructuredMarkdownLoader,
}

def load_documents_recursively(directory: str):
    """
    递归加载指定目录下的支持类型文档（.tet, .pdf, .doc, .ppt, .pptx, .png, .jpg, .MD）
    :param directory: 目录路径
    :return: 文档列表,含page_content和metadata
    """
    documents = []
    # 获取支持的文件扩展名
    # support_file_names =  document_loader.keys()
    # 从目录中提取“学生类别”元数据,source_category=AI
    source_category = os.path.basename(os.path.dirname(directory)).replace("_data", "")
    for root, _ , files in os.walk(directory):
        logger.info(f"当前目录: {root}, 子目录: {_}, 文件列表: {files}")
        for file in files:
            file_path = os.path.join(root, file)
            file_ext = os.path.splitext(file_path)[1].lower()
            if file_ext in document_loader:
                try:
                    if ".txt" == file_ext:
                        document = document_loader[file_ext](file_path, encoding="utf-8")
                    else:
                        document = document_loader[file_ext](file_path)
                    docs = document.load()
                    docs[0].metadata["create_time"] = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
                    # document.metadata["source_category"] = source_category
                    documents.extend(docs)
                except Exception as e:
                    print(f"加载文件 {file_path} 时出错: {e}")
            else:
                print(f"文件 {file_path} 扩展名 {file_ext} 不支持加载")
    # 返回所有加载的文档列表，每个文档包含page_content和metadata
    return documents

# 进行文档分层，以父块+子块，返回带元数据的子块列表
def document_split_chunk(directory_path: str, parent_chunk_size: int = None, child_chunk_size: int = None, chunk_overlap: int = None):
    """
    对文档进行分层处理，以父块+子块的方式返回带元数据的子块列表
    :param documents: 文档列表，每个文档包含page_content和metadata
    :param parent_chunk_size: 父块大小
    :param child_chunk_size: 子块大小
    :return: 子块列表，每个子块包含page_content和metadata
    """
    documents = load_documents_recursively(directory_path)
    if parent_chunk_size is None:
        parent_chunk_size = int(config_ini.DOC_CHUNK.PARENT_CHUNK_SIZE)
    if child_chunk_size is None:
        child_chunk_size = int(config_ini.DOC_CHUNK.CHILD_CHUNK_SIZE)
    if chunk_overlap is None:
        chunk_overlap = int(config_ini.DOC_CHUNK.CHUNK_OVERLAP)
    # 初始化MarkdownTextSplitter
    print("parent_chunk_size:", parent_chunk_size, "child_chunk_size:", child_chunk_size, "chunk_overlap:", chunk_overlap)
    parent_spliter = ChineseRecursiveTextSplitter(chunk_size=parent_chunk_size, chunk_overlap=chunk_overlap)
    # 初始化Markdown 专用分词器
    parent_markdown_splitter = MarkdownTextSplitter(chunk_size=parent_chunk_size, chunk_overlap=chunk_overlap)

    child_spliter = ChineseRecursiveTextSplitter(chunk_size=child_chunk_size, chunk_overlap=chunk_overlap)
    child_markdown_splitter = MarkdownTextSplitter(chunk_size=child_chunk_size, chunk_overlap=chunk_overlap)

    child_chunk_list = []
    for i, doc in enumerate(documents):
        # 获取文件扩展名
        file_ext = os.path.splitext(doc.metadata.get("source"))[1].lower()
        is_markdown = file_ext in [".md", ".markdown"]
        # 根据文件类型选择合适的切分器
        use_parent_spliter = parent_markdown_splitter if is_markdown else parent_spliter
        use_child_splitter = child_markdown_splitter if is_markdown else child_spliter
        logger.info(f"当前文档: {doc.metadata.get('source')}, 父类切分器: {use_parent_spliter}，子类切分器: {use_child_splitter}")
        # 将文档切分成大块
        parent_chunks = use_parent_spliter.split_documents([doc])
        for k, parent_chunk in enumerate(parent_chunks):
            parent_id = f"doc_{i}_parent_{k}"
            parent_chunk.metadata["parent_id"] = parent_id
            parent_chunk.metadata["parent_content"] = parent_chunk.page_content
            # 切分父块为子块
            child_chunks = use_child_splitter.split_documents([parent_chunk])
            for j, child_chunk in enumerate(child_chunks):
                child_chunk.metadata["parent_id"] = parent_id
                child_chunk.metadata["parent_content"] = parent_chunk.page_content
                child_chunk.metadata["child_id"] = f"parent_{parent_id}_child_{j}"
                child_chunk_list.append(child_chunk)

    logger.info(f"共处理 {i+1} 个文档，生成 {len(child_chunk_list)} 个子块")
    return child_chunk_list





if __name__ == '__main__':
    directory = r"D:\WorkArea\WorkSpace\Python\spark-edu-rag\src\spark_edu_rag\resources\data\ai_data"
    # documents = load_documents_recursively(directory)
    # print(documents)
    child_chunk_list = document_split_chunk(directory)
    print(child_chunk_list)
