from config import envConfig, config
import os
from langchain_milvus import Milvus
from langchain_text_splitters import MarkdownHeaderTextSplitter
from langchain_ollama import OllamaEmbeddings
from langchain.retrievers import EnsembleRetriever
from pymilvus import connections, FieldSchema, CollectionSchema, DataType, Collection
import logging
from pymilvus import utility

logger = logging.getLogger('vectorize')



MAX_CHUNK_SIZE = 400
CHUNK_OVERLAP = 100
embedding = OllamaEmbeddings(
    model="nomic-embed-text", # 改embedding配置
    base_url=envConfig.OLLAMA_URL
)

def get_all_md():
    # 目标目录路径所有.md文件
    target_dir = r"D:\PyCharmMiscProject\pythonProjectYA\中间结果\中间结果2"

    # 路径合法性
    if not os.path.exists(target_dir):
        raise FileNotFoundError(f"目录不存在: {target_dir}")
    if not os.path.isdir(target_dir):
        raise NotADirectoryError(f"路径不是目录: {target_dir}")

    # 获取目录下所有 .md 文件的完整路径
    md_files = []
    for filename in os.listdir(target_dir):
        if filename.endswith(".md"):
            full_path = os.path.join(target_dir, filename)
            md_files.append(full_path)

    return md_files


from pathlib import Path

def get_all_md_perfect():
    # 用 pathlib 库更优雅地处理路径
    target_dir = Path(r"D:\PyCharmMiscProject\pythonProjectYA\中间结果\中间结果2")
    if not target_dir.is_dir():
        raise NotADirectoryError(f"路径不是目录: {target_dir}")
    return [str(p) for p in target_dir.glob("*.md")]


def split_by_md_header(mdDocs):
    """
    Split with metadata like "Metadata (("Header_1", "xxx"), ("Header_2", "xxxx")) page_content="xxx..."
    :param mdDocs:
    :return:
    """
    headers_to_split_on = [("#", "Header_1"), ("##", "Header_2"), ("###", "Header_3")]
    md_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on, return_each_line=True)
    md_header_splits = md_splitter.split_text(mdDocs)
    final_splits = []
    for _, i in enumerate(md_header_splits):
        print(f"Index: {_}, Metadata: {i.metadata} Content: {i.page_content}")
        print("---------------------")
    return md_header_splits


def get_vector_store(collectionName: str = 'LangChainCollection', is_local=True):
    """
    获取 Milvus 向量存储连接
    :param collectionName: 集合名称
    :param is_local: 是否连接本地 Docker 实例
    """
    if is_local:
        # 连接 Docker 中的 Milvus
        milvusConn = {
            "uri": envConfig.MILVUS_LOCAL_URI,
            "user": envConfig.MILVUS_USER,  # 如果启用认证需填写
            "password": envConfig.MILVUS_PASSWORD
        }
    else:
        # 连接云端备份
        milvusConn = {
            "uri": envConfig.BACKUP_MILVUS_URI,
            "user": envConfig.BACKUP_MILVUS_USERNAME,
            "password": envConfig.BACKUP_MILVUS_PASSWORD,
            "secure": True  # 启用 HTTPS
        }

    logger.info(f"Connecting to Milvus: {milvusConn['uri']}")

    # 建立连接
    try:
        connections.connect(**milvusConn)
        logger.info("Milvus 连接成功")
    except Exception as e:
        logger.error(f"Milvus 连接失败: {str(e)}")
        raise

    # 定义集合的字段
    fields = [
        FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=False),
        FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=4096),
        FieldSchema(name="header_1", dtype=DataType.VARCHAR, max_length=256, default_value='EMPTY'),
        FieldSchema(name="header_2", dtype=DataType.VARCHAR, max_length=256, default_value='EMPTY'),
        FieldSchema(name="header_3", dtype=DataType.VARCHAR, max_length=256, default_value='EMPTY'),
        FieldSchema(name="metadata", dtype=DataType.JSON),
        FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=768)
    ]

    # 创建集合
    schema = CollectionSchema(fields, description="LangChain Collection")
    collection = Collection(name=collectionName, schema=schema)
    index_params = {
        "index_type": "IVF_FLAT",
        "metric_type": "L2",
        "params": {"nlist": 128}
    }
    collection.create_index(field_name="vector", index_params=index_params)

    # 加载集合
    collection.load()

    # 返回集合对象
    return collection


def vectorize_all(drop_old=True):
    print("Starting to vectorize all markdown files")

    embeddings = OllamaEmbeddings(
        model="nomic-embed-text",  # 直接使用新模型名称
        base_url=envConfig.OLLAMA_URL
    )
    print(f"Embedding Model: {embeddings.model}")
    files = get_all_md()
    all_splits = []
    for i, file in enumerate(files):
        with open(file, 'r', encoding='utf-8') as f:
            mdDocs = f.read()
        md_header_splits = split_by_md_header(mdDocs)
        all_splits.extend(md_header_splits)

    # 生成唯一的 id s
    ids = [j for j in range(len(all_splits))]

    # 创建文档对象并添加所有字段
    documents = []
    for id, split in zip(ids, all_splits):
        metadata = split.metadata
        header_1 = metadata.get("Header_1", "")
        header_2 = metadata.get("Header_2", "")
        header_3 = metadata.get("Header_3", "")
        text_embedding = embeddings.embed_query(split.page_content)
        documents.append({
            "id": id,
            "text": split.page_content,
            "header_1": header_1,
            "header_2": header_2,
            "header_3": header_3,
            "metadata": metadata,
            "vector": text_embedding
        })

    # vector_store = get_vector_store(
    #     collectionName=config.APP_NAME.replace(' ', '_'),
    #     is_local=False
    # )
    vector_store = get_vector_store(
        collectionName=config.APP_NAME.replace(' ', '_'),
        is_local=True  # 改为使用本地实例
    )

    # 插入数据
    vector_store.insert(documents)
    print(f"Vectorized Done to collection: {config.APP_NAME.replace(' ', '_')}")


# vector = get_vector_store(embedding, "Intention_Instruction")
# vector.as_retriever().invoke("指令类型")
def test_vectorize():
    """
    测试向量存储功能（优先使用 Docker 实例）
    """
    # 直接使用 Docker 实例配置
    milvusConn = {
        "uri": envConfig.MILVUS_LOCAL_URI,
        "user": envConfig.MILVUS_USER,
        "password": envConfig.MILVUS_PASSWORD
    }

    if drop_old: # 在插入数据前添加集合清理逻辑
        collection_name = config.APP_NAME.replace(' ', '_')
        if utility.has_collection(collection_name):
            utility.drop_collection(collection_name)
            print(f"已删除旧集合: {collection_name}")

    embeddings = OllamaEmbeddings(
        model="nomic-embed-text",
        base_url=envConfig.OLLAMA_URL
    )
    retriever = Milvus(collection_name=config.APP_NAME.replace(' ', '_'), connection_args=milvusConn,
                    embedding_function=embeddings).as_retriever(search_kwargs={"k": 6})
    ensemble_retriever = EnsembleRetriever(retrievers=[retriever])
    is_over = False
    while not is_over:
        query = input("请输入查询：")
        if query == "exit":
            is_over = True
            continue
        result = ensemble_retriever.get_relevant_documents(query)
        for i, ele in enumerate(result):
            print(f"{i} - Found Doc: {ele.metadata}, {ele.page_content}")


vectorize_all()
test_vectorize()
print("FINISH!")

