import os
from parser.pdf import extract_pdf_text
from langchain.text_splitter import RecursiveCharacterTextSplitter
from llm.local import ollama_qa

# 导入各类文件解析函数
from parser.pdf import extract_pdf_text
from parser.word import extract_text_from_word
from parser.excel import extract_text_from_excel
from parser.ppt import extract_ppt_text
from parser.htm import extract_text_from_html
from parser.xml import extract_xml_text
from parser.csvs import read_csv_to_text
from vectorstore import save_text_to_db

splitter = RecursiveCharacterTextSplitter(
    chunk_size=50,  # 每个分块最大是50个字符
    chunk_overlap=10,  # 相邻的分块重叠的字符
    separators=[
        "\n\n",  # 段落分隔符
        "\n",  # 换行符
        "。",  # 中文句号
        ".",  # 英文名号
        "，",  # 中文逗号
        ",",  # 英文逗号
    ],
)
# 导入logging模块用于日志记录
import logging

# 配置日志格式和级别
logging.basicConfig(
    level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s"
)


def extract_text(file_path):
    ext = os.path.splitext(file_path)[-1].lower()
    # 根据不同文件类型调用相应的解析函数
    if ext == ".pdf":
        logging.info(f"检测到PDF文件，开始提取文本: {file_path}")
        return extract_pdf_text(file_path)
    elif ext in [".docx", ".doc"]:
        logging.info(f"检测到Word文件，开始提取文本: {file_path}")
        return extract_text_from_word(file_path)
    elif ext in [".xlsx", ".xls"]:
        logging.info(f"检测到Excel文件，开始提取文本: {file_path}")
        return extract_text_from_excel(file_path)
    elif ext in [".pptx", ".ppt"]:
        logging.info(f"检测到PPT文件，开始提取文本: {file_path}")
        return extract_ppt_text(file_path)
    elif ext in [".html", ".htm"]:
        logging.info(f"检测到HTML文件，开始提取文本: {file_path}")
        return extract_text_from_html(file_path)
    elif ext == ".xml":
        logging.info(f"检测到XML文件，开始提取文本: {file_path}")
        return extract_xml_text(file_path)
    elif ext == ".csv":
        logging.info(f"检测到CSV文件，开始提取文本: {file_path}")
        return read_csv_to_text(file_path)
    elif ext in [".md", ".txt", ".jsonl"]:
        logging.info(f"检测到文本/Markdown/JSONL文件，开始读取: {file_path}")
        with open(file_path, "r", encoding="utf-8") as f:
            return f.read()
    else:
        # 不支持的文件类型，抛出异常
        logging.error(f"不支持的文件类型: {ext}")
        raise ValueError("不支持的文件类型: " + ext)


def doc_to_vectorstore(file_path):
    # 1.提取上传的文件的内容
    text = extract_text(file_path)
    print(f"提取上传的文件的内容:{text}")
    chunks = splitter.split_text(text)
    print(f"文本分块完成:{chunks}")
    for idx, chunk in enumerate(chunks):
        save_text_to_db(chunk)


def create_hierarchical_index(chunk):
    small_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20)
    small_chunks = small_splitter.split_text(chunk)
    for i, small_chunk in enumerate(small_chunks, 1):
        save_text_to_db(
            small_chunk,
            collection_name="rag",
            metadata={
                "original_chunk": chunk,  # 指向的原始的文件块
                "index_type": "hierarchical",  # 索引的类型
            },
        )


def create_summary_index(chunk):
    summary_prompt = f"""
    请为以下的文档生成一个简洁的摘要(不能超过100字)
    {chunk}
    """
    summary = ollama_qa(summary_prompt)
    save_text_to_db(
        summary,
        collection_name="rag",
        metadata={
            "original_chunk": chunk,  # 指向的原始的文件块
            "index_type": "summary",  # 索引的类型
        },
    )


def create_question_index(chunk):
    question_prompt = f"""
    请为以下的文档生成3个假设性的问题，请确保这些问题的答案能在文档中找到,每一个问题用？加换行结束
    {chunk}
    """
    questions_response = ollama_qa(question_prompt)
    questions = [q for q in questions_response.split("\n")]
    for i, question in enumerate(questions[:3], 1):
        save_text_to_db(
            question,
            collection_name="rag",
            metadata={
                "original_chunk": chunk,  # 指向的原始的文件块
                "index_type": "question",  # 索引的类型
            },
        )


def doc_to_multidimensional_vectorstore(file_path):
    # 提取文件的内容
    text = extract_text(file_path)
    splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=50)
    chunks = splitter.split_text(text)
    doc_to_vectorstore(file_path)
    for i, chunk in enumerate(chunks, 1):
        # 创建层次化索引
        create_hierarchical_index(chunk)
        # 创建摘要索引
        create_summary_index(chunk)
        # 创建问题化索引
        create_question_index(chunk)


if __name__ == "__main__":
    file_path = "data/candidates.csv"
    doc_to_multidimensional_vectorstore(file_path)
