## 功能：把一个指定文件夹下的所有文件（pdf或者word）向量化后存入milvus，并展示数据库中所有的集合

# 运行后输入包含PDF/Word文档的文件夹路径
# 自动处理所有支持的文档格式
# 为每个文档创建独立的Milvus集合
# 处理完成后显示所有集合列表

import os
from config import envConfig
from langchain_community.document_loaders import PyPDFLoader, Docx2txtLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_ollama import OllamaEmbeddings
from pymilvus import (
    connections, utility, Collection,
    FieldSchema, CollectionSchema, DataType
)
import logging
import re

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger('vectorize_docs')

EMBEDDING_MODEL = "deepseek-r1:1.5b"

def get_config_value(key, default=None):
    """安全获取配置值"""
    try:
        return getattr(envConfig, key, default)
    except AttributeError:
        return default

def sanitize_collection_name(filename):
    """创建有效的Milvus集合名称"""
    # 移除文件扩展名
    name = os.path.splitext(filename)[0]
    # 替换非法字符
    name = re.sub(r'[^a-zA-Z0-9_]', '_', name)
    # 确保以字母开头
    if not name[0].isalpha():
        name = "doc_" + name
    # 限制长度
    return name[:64]

def create_collection(collection_name, dim=1536):
    """创建Milvus集合"""
    # 检查集合是否已存在
    if utility.has_collection(collection_name):
        logger.info(f"集合 {collection_name} 已存在，跳过创建")
        return Collection(collection_name)

    # 定义字段
    fields = [
        FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True),
        FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=dim),
        FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535),
        FieldSchema(name="metadata", dtype=DataType.JSON)
    ]

    # 创建schema
    schema = CollectionSchema(fields, description="文档向量库")

    # 创建集合
    collection = Collection(
        name=collection_name,
        schema=schema,
        using="default"
    )

    # 创建索引
    index_params = {
        "index_type": "IVF_FLAT",
        "metric_type": "L2",
        "params": {"nlist": 128}
    }
    collection.create_index("vector", index_params)
    logger.info(f"集合 {collection_name} 创建成功，维度: {dim}")
    return collection

def load_and_split_document(file_path):
    """加载文档并分割文本"""
    logger.info(f"开始加载文档: {file_path}")

    # 根据文件类型选择加载器
    if file_path.lower().endswith('.pdf'):
        loader = PyPDFLoader(file_path)
    elif file_path.lower().endswith(('.doc', '.docx')):
        loader = Docx2txtLoader(file_path)
    else:
        logger.error(f"不支持的文档格式: {file_path}")
        return []

    try:
        documents = loader.load()
    except Exception as e:
        logger.error(f"文档加载失败: {str(e)}")
        return []

    # 设置文本分割器
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=800,
        chunk_overlap=100,
        length_function=len,
        is_separator_regex=False,
    )

    # 分割文档
    chunks = text_splitter.split_documents(documents)
    logger.info(f"文档分割完成，共 {len(chunks)} 个文本片段")

    # 添加元数据
    filename = os.path.basename(file_path)
    for chunk in chunks:
        if 'page' not in chunk.metadata:
            chunk.metadata['page'] = 0
        chunk.metadata['source'] = filename

    return chunks

def vectorize_document(file_path, collection_name):
    """将文档内容向量化并存储到Milvus"""
    # 1. 连接Milvus
    try:
        connections.connect(
            alias="default",
            host=get_config_value('MILVUS_HOST', 'localhost'),
            port=get_config_value('MILVUS_PORT', '19530'),
            user=get_config_value('MILVUS_USER', ''),
            password=get_config_value('MILVUS_PASSWORD', ''),
            secure=False
        )
        logger.info(f"成功连接Milvus: {get_config_value('MILVUS_HOST', 'localhost')}:{get_config_value('MILVUS_PORT', '19530')}")
    except Exception as e:
        logger.error(f"Milvus连接失败: {str(e)}")
        return False

    # 2. 初始化嵌入模型
    embeddings = OllamaEmbeddings(
        model=EMBEDDING_MODEL,
        base_url=get_config_value('OLLAMA_URL', 'http://localhost:11434')
    )

    # 3. 测试嵌入模型获取维度
    try:
        test_embed = embeddings.embed_query("测试文本")
        dim = len(test_embed)
        logger.info(f"嵌入模型维度: {dim}")
    except Exception as e:
        logger.error(f"嵌入模型测试失败: {e}")
        return False

    # 4. 创建或获取集合
    collection = create_collection(collection_name, dim)

    # 5. 加载和分割文档
    chunks = load_and_split_document(file_path)
    if not chunks:
        return False

    # 6. 准备插入数据
    texts = [chunk.page_content for chunk in chunks]
    metadatas = [chunk.metadata for chunk in chunks]

    # 7. 生成嵌入向量
    logger.info(f"开始生成向量嵌入...")
    try:
        vectors = embeddings.embed_documents(texts)
    except Exception as e:
        logger.error(f"向量生成失败: {e}")
        return False

    # 8. 插入数据到Milvus
    entities = [
        vectors,  # 向量数据
        texts,    # 文本内容
        metadatas # 元数据
    ]

    try:
        insert_result = collection.insert(entities)
        logger.info(f"成功插入 {len(chunks)} 个文档片段到集合 {collection_name}")
    except Exception as e:
        logger.error(f"数据插入失败: {e}")
        return False

    # 9. 刷新集合使新数据可搜索
    collection.flush()
    return True

def list_all_collections():
    """列出所有集合"""
    try:
        connections.connect(
            alias="default",
            host=get_config_value('MILVUS_HOST', 'localhost'),
            port=get_config_value('MILVUS_PORT', '19530'),
            user=get_config_value('MILVUS_USER', ''),
            password=get_config_value('MILVUS_PASSWORD', ''),
            secure=False
        )
        collections = utility.list_collections()
        print("\nMilvus 中的所有集合:")
        for i, col in enumerate(collections, 1):
            print(f"{i}. {col}")
        return collections
    except Exception as e:
        print(f"获取集合列表失败: {str(e)}")
        return []

def main():
    """主函数：处理文件夹中的所有文档"""
    # 配置文档文件夹路径
    DOCS_DIR = input("请输入文档文件夹路径: ").strip()

    if not os.path.isdir(DOCS_DIR):
        print(f"错误: 文件夹 '{DOCS_DIR}' 不存在")
        return

    # 处理所有支持的文档
    supported_extensions = ['.pdf', '.doc', '.docx']
    processed_files = 0

    for filename in os.listdir(DOCS_DIR):
        file_path = os.path.join(DOCS_DIR, filename)
        if os.path.isfile(file_path) and any(filename.lower().endswith(ext) for ext in supported_extensions):
            collection_name = sanitize_collection_name(filename)
            print(f"\n处理文件: {filename}")
            print(f"集合名称: {collection_name}")

            if vectorize_document(file_path, collection_name):
                processed_files += 1
                print(f"✓ 完成: {filename}")
            else:
                print(f"✗ 失败: {filename}")

    print(f"\n处理完成! 共处理 {processed_files} 个文档")

    # 列出所有集合
    list_all_collections()

if __name__ == "__main__":
    main()