# -*- coding: utf-8 -*-
# time: 2025/4/1 16:39
# file: vector.py.py
# author: hanson
"""
将文档转 向量 基于

"""
import time

from langchain_chroma import Chroma
from langchain_community.document_loaders import DirectoryLoader, PyPDFLoader, Docx2txtLoader, TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_ollama import OllamaEmbeddings
import pdfplumber
from langchain_text_splitters import RecursiveCharacterTextSplitter

Load_path = r"F:\workspace\py_project\intellect\llm\pdf"

def load_documents(source_dir: str):
    text_loader=DirectoryLoader(source_dir,
                                glob=["**/*.md", "**/*.txt"],
                                show_progress=True, #显示进度条
                                use_multithreading=True,# 使用多线程
                                loader_cls=TextLoader,  # 指定加载器
                                loader_kwargs={"autodetect_encoding": True},
                                )
    pdf_loader = DirectoryLoader(path=source_dir,glob= "**/*.pdf", show_progress=True,silent_errors=True,
                                 use_multithreading=True,loader_cls=pdfplumber,)
    # docx 格式
    """
    docx_loader = DirectoryLoader(
        path=source_dir,
        glob=["**/*.doc", "**/*.docx"],
        show_progress=True,
        use_multithreading=True,
        silent_errors=True,
        loader_cls=Docx2txtLoader,
        loader_kwargs={"autodetect_encoding": True},
    )
    """
 # 合并文档列表
    docs = []
    docs.extend(text_loader.load())
    docs.extend(pdf_loader.load())
    #docs.extend(docx_loader.load())
    print(f"成功加载 {len(docs)} 份文档")
    return docs

def split_documents(documents, chunk_size=800, chunk_overlap=150):
    """
    使用递归字符分割器处理文本
    参数说明：
    - chunk_size：每个文本块的最大字符数，推荐 500-1000                   **很重要**
    - chunk_overlap：相邻块之间的重叠字符数（保持上下文连贯），推荐 100-200  **很重要**
    """
    text_splitter = RecursiveCharacterTextSplitter(
        separators=["\n\n", "\n", ".", "。", "!", "?", "？", "！", "；", ";"],
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap,
        length_function=len,
        add_start_index=True,  # 保留原始文档中的位置信息
    )

    split_docs = text_splitter.split_documents(documents)
    print(f"原始文档数：{len(documents)}")
    print(f"分割后文本块数：{len(split_docs)}")

    # 查看分割效果示例
    print("\n示例文本块：")
    print(split_docs[0].page_content[:300] + "...")
    print(f"元数据：{split_docs[0].metadata}")

    return split_docs

# 指定持久化向量数据库的存储路径
VECTOR_DIR = "data/vector_store"

"""
# 可以用下面这个模型做嵌入吗
embed_model = HuggingFaceEmbedding(
    model_name=r"E:\soft\embedding\Ceceliachenen\paraphrase-multilingual-MiniLM-L12-v2"
)
Settings.embed_model = embed_model
"""

def create_vector_store(split_docs, embeddings, persist_directory=VECTOR_DIR):
    if embeddings is None:
       # embedding = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
        # 初始化本地嵌入模型
        embeddings = OllamaEmbeddings(model="deepseek-r1:7b")
    try:
        start_time = time.time()

        # 创建带进度显示的向量数据库
        db = Chroma.from_documents(
            documents=split_docs,
            embedding=embeddings,
            persist_directory=persist_directory,  # 持久化存储路径
        )

        print(f"\n向量化完成！耗时 {time.time() - start_time:.2f} 秒")
        print(f"数据库存储路径：{persist_directory}")
        print(f"总文档块数：{db._collection.count()}")

        return db
    except Exception as e:
        print(f"向量化失败：{str(e)}")
        return None


if __name__ == '__main__':
    documents = load_documents(Load_path)
    print(len(documents))
    # 测试是否成功加载文档
    for doc in documents[:2]:  # 打印前两篇摘要
        print(f"文件路径: {doc.metadata['source']}")
        print(f"内容预览: {doc.page_content[:150]}...\n")
#    执行向量化（使用之前分割好的split_docs）
    vector_db = create_vector_store(split_documents(documents),None)