# 功能：加载本地Markdown文件，分割文本，向量化后存入Milvus数据库
from config import envConfig, config
import os
from langchain_text_splitters import MarkdownHeaderTextSplitter
from langchain_ollama import OllamaEmbeddings
from pymilvus import connections, FieldSchema, CollectionSchema, DataType, Collection, utility
import logging
from pathlib import Path

logger = logging.getLogger('vectorize')

MAX_CHUNK_SIZE = 400
CHUNK_OVERLAP = 100

def get_all_md():
    """获取目标目录下所有.md文件"""
    target_dir = Path(r"D:\PyCharmMiscProject\pythonProjectYA\中间结果\中间结果2")
    if not target_dir.is_dir():
        raise NotADirectoryError(f"路径不是目录: {target_dir}")
    return [str(p) for p in target_dir.glob("*.md")]

def split_by_md_header(mdDocs):
    """
    按Markdown标题分割文档
    返回格式: Metadata (("Header_1", "xxx"), ("Header_2", "xxxx")) page_content="xxx..."
    """
    headers_to_split_on = [("#", "Header_1"), ("##", "Header_2"), ("###", "Header_3")]
    md_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on, return_each_line=True)
    return md_splitter.split_text(mdDocs)

def get_vector_store(collectionName: str = 'LangChainCollection', is_local=True):
    """获取或创建Milvus向量存储集合"""
    # 连接参数配置
    milvusConn = {
        "uri": envConfig.MILVUS_LOCAL_URI if is_local else envConfig.BACKUP_MILVUS_URI,
        "user": envConfig.MILVUS_USER if is_local else envConfig.BACKUP_MILVUS_USERNAME,
        "password": envConfig.MILVUS_PASSWORD if is_local else envConfig.BACKUP_MILVUS_PASSWORD,
        "secure": not is_local
    }

    # 建立连接
    try:
        connections.connect(**milvusConn)
        logger.info(f"成功连接 Milvus: {milvusConn['uri']}")
    except Exception as e:
        logger.error(f"Milvus 连接失败: {str(e)}")
        raise

    # 定义集合 Schema
    fields = [
        FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=False),
        FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=4096),
        FieldSchema(name="header_1", dtype=DataType.VARCHAR, max_length=256, default_value='EMPTY'),
        FieldSchema(name="header_2", dtype=DataType.VARCHAR, max_length=256, default_value='EMPTY'),
        FieldSchema(name="header_3", dtype=DataType.VARCHAR, max_length=256, default_value='EMPTY'),
        FieldSchema(name="metadata", dtype=DataType.JSON),
        FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=768)
    ]

    schema = CollectionSchema(fields, description="LangChain Collection")

    # 检查并删除已存在的集合
    if utility.has_collection(collectionName):
        logger.warning(f"集合 {collectionName} 已存在，正在删除...")
        utility.drop_collection(collectionName)

    # 创建新集合
    collection = Collection(name=collectionName, schema=schema)

    # 创建索引
    index_params = {
        "index_type": "IVF_FLAT",
        "metric_type": "L2",
        "params": {"nlist": 128}
    }
    collection.create_index(field_name="vector", index_params=index_params)
    collection.load()

    return collection

def vectorize_all():
    """主函数：向量化所有Markdown文件并存入Milvus"""
    print("Starting to vectorize all markdown files")

    # 初始化 Milvus 集合
    collection_name = config.APP_NAME.replace(' ', '_')
    vector_store = get_vector_store(collectionName=collection_name, is_local=True)

    # 初始化嵌入模型
    embeddings = OllamaEmbeddings(
        model="nomic-embed-text",
        base_url=envConfig.OLLAMA_URL
    )

    # 处理所有Markdown文件
    files = get_all_md()
    all_splits = []
    for file in files:
        with open(file, 'r', encoding='utf-8') as f:
            mdDocs = f.read()
        all_splits.extend(split_by_md_header(mdDocs))

    # 准备插入数据
    documents = [{
        "id": idx,
        "text": split.page_content,
        "header_1": split.metadata.get("Header_1", ""),
        "header_2": split.metadata.get("Header_2", ""),
        "header_3": split.metadata.get("Header_3", ""),
        "metadata": split.metadata,
        "vector": embeddings.embed_query(split.page_content)
    } for idx, split in enumerate(all_splits)]

    # 插入数据
    vector_store.insert(documents)
    print(f"数据已存入集合: {collection_name}")

if __name__ == "__main__":
    vectorize_all()
    print("Vectorization completed!")