#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@File    ：pipline.py
@Author  ：平
@Date    ：2025/9/28 14:23 
"""
from enum import Enum
from pathlib import Path

from langchain_community.document_loaders import PyPDFLoader, TextLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter

from app.config.config import settings
import logging

logger = logging.getLogger(__name__)


class Pipline:
    """
    数据处理管道
    """

    class FileSuffix(Enum):
        PDF = ".pdf"
        TXT = ".txt"

    LOADER_MAP = {
        FileSuffix.PDF: PyPDFLoader,
        FileSuffix.TXT: TextLoader,
    }

    def __init__(self, file_path: str | Path, file_suffix: FileSuffix, metadata: dict = None):
        self.file_path = file_path
        self.file_suffix = file_suffix
        self.metadata = {} if metadata is None else metadata
        self.loader = Pipline.LOADER_MAP[file_suffix](file_path)
        # 在 __init__ 方法中或者 split 方法中更新分隔符
        self.spliter = RecursiveCharacterTextSplitter(
            chunk_size=settings.DOCUMENT_CHUNK_SIZE,
            chunk_overlap=settings.DOCUMENT_CHUNK_OVERLAP,
            separators=["\n\n", "\n", "。", "！", "？", "；", "，", " ", ""]
        )
        self.docs = []
        self.chunks = []

    def load(self):
        self.docs = self.loader.lazy_load()
        logger.info(f"懒加载文件{self.file_path}成功")
        return self

    def split(self):
        chunks = self.spliter.split_documents(self.docs)
        logger.info(f"分词成功，共生成{len(chunks)}个chunk")
        for chunk in chunks:
            chunk.metadata.update(self.metadata)
            # 清除元数据中为空的键
            chunk.metadata = {k: v for k, v in chunk.metadata.items() if v}
        self.chunks = chunks
        return self

    def get(self):
        docs, chunks = self.docs, self.chunks
        self.docs, self.chunks = [], []
        logger.info(f"处理文件{self.file_path}成功")
        return docs, chunks
