import os
import logging
from langchain_text_splitters import RecursiveCharacterTextSplitter
import extract
from vectorstore import save_text_to_db

logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)

logger = logging.getLogger(__name__)

# 默认的集合名称
DEFAULT_COLLECTION_NAME = "rag_system_collection"
# 默认的分块大小
DEFAULT_CHUNK_SIZE = 500
# 默认的分块重叠大小
DEFAULT_CHUNK_OVERLAP = 50


def extract_text_auto(file_path: str) -> str:
    if not os.path.exists(file_path):
        raise FileNotFoundError(f"文件不存在:{file_path}")
    ext = os.path.splitext(file_path)[-1].lower()
    try:
        # 如果是pdf文件
        if ext == ".pdf":
            logger.info(f"检测到PDF文件，开始提取文本: {file_path}")
            return extract.extract_pdf_text(file_path)
        # 如果是Word文档
        elif ext in [".docx", ".doc"]:
            logger.info(f"检测到Word文件，开始提取文本: {file_path}")
            return extract.extract_text_from_word(file_path)
        # 如果是Excel文件
        elif ext in [".xlsx", ".xls"]:
            logger.info(f"检测到Excel文件，开始提取文本: {file_path}")
            return extract.extract_text_from_excel(file_path)
        # 如果是PPT文件
        elif ext in [".pptx", ".ppt"]:
            logger.info(f"检测到PPT文件，开始提取文本: {file_path}")
            return extract.extract_ppt_text(file_path)
        # 如果是HTML文件
        elif ext in [".html", ".htm"]:
            logger.info(f"检测到HTML文件，开始提取文本: {file_path}")
            return extract.extract_text_from_html(file_path)
        # 如果是XML文件
        elif ext == ".xml":
            logger.info(f"检测到XML文件，开始提取文本: {file_path}")
            return extract.extract_xml_text(file_path)
        # 如果是CSV文件
        elif ext == ".csv":
            logger.info(f"检测到CSV文件，开始提取文本: {file_path}")
            return extract.read_csv_to_text(file_path)
        # 如果是JSON文件
        elif ext == ".json":
            logger.info(f"检测到JSON文件，开始提取文本: {file_path}")
            return extract.extract_text_from_json(file_path)
        # 如果是纯文本、Markdown、JSONL文件
        elif ext in [".md", ".txt", ".jsonl"]:
            logger.info(f"检测到文本/Markdown/JSONL文件，开始读取: {file_path}")
            return extract.read_text_file(file_path)
        # 其余不支持的文件类型
        else:
            logger.error(f"不支持的文件类型: {ext}")
            raise ValueError(f"不支持的文件类型: {ext}")
    except Exception as e:
        raise


def doc_to_vectorstore(
    file_path: str,
    collection_name: str = DEFAULT_COLLECTION_NAME,
    chunk_size: int = DEFAULT_CHUNK_SIZE,
    chunk_overlap: int = DEFAULT_CHUNK_OVERLAP,
) -> int:
    """
    将文档分块保存到向量数据库中
    :param file_path: 文档路径
    :param collection_name: 向量存储集合名称
    :param chunk_size: 分块大小
    :param chunk_overlap: 分块重叠大小
    返回:
      int: 保存的文档块数量
    """
    text = extract_text_auto(file_path)
    if not text.strip():
        logger.warning(f"提取的文本内容为空: {file_path}")
        return 0
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size, chunk_overlap=chunk_overlap
    )
    chunks = text_splitter.split_text(text)
    logger.info(f"文档分块完成，块数量: {len(chunks)}")

    success_count = 0
    for idx, chunk in enumerate(chunks):
        try:
            logger.info(f"正在保存第 {idx + 1} 块到向量数据库...")
            save_text_to_db(chunk, collection_name=collection_name)
            success_count += 1
        except Exception as e:
            logger.error(f"保存第 {idx + 1} 块失败: {str(e)}")
    logger.info(
        f"文件{file_path}已经完成向量化并入库，成功保存到向量数据库的块数量: {success_count}/{len(chunks)}"
    )
    return success_count
