import os
from pymilvus import connections, db, utility, Collection, CollectionSchema, FieldSchema, DataType
from sentence_transformers import SentenceTransformer

def search_milvus(query_vector):
    db_name = "maxwell_test"
    host = "192.168.1.110"
    port = "19530"

    try:
        connections.connect(alias="default", host=host, port=port)

        if db_name not in db.list_database():
            print(f"数据库 '{db_name}' 不存在，正在创建...")
            db.create_database(db_name)

        db.using_database(db_name)
        print(f"已连接到数据库：{db_name}")

    except Exception as e:
        print(f"数据库连接失败：{e}")
        return None

    collection_name = "test_total"
    embedding_model = SentenceTransformer('bert-base-nli-mean-tokens')
    expected_dim = 768

    # 如果 collection 不存在则创建
    if not utility.has_collection(collection_name):
        print(f"Collection '{collection_name}' 不存在，开始创建并插入数据...")

        base_dir = os.getcwd()
        data_dir = os.path.join(base_dir, "data")

        if not os.path.exists(data_dir):
            print(f"数据文件夹 {data_dir} 不存在！")
            return None

        try:
            file_list = [f for f in os.listdir(data_dir) if f.endswith('.txt')]
        except PermissionError as e:
            print(f"无法访问文件夹 {data_dir}: {e}")
            return None

        texts = []
        ids = []

        for idx, filename in enumerate(file_list):
            filepath = os.path.join(data_dir, filename)
            try:
                with open(filepath, 'r', encoding='utf-8') as f:
                    texts.append(f.read())
                    ids.append(idx)
            except Exception as e:
                print(f"读取文件 {filepath} 时发生错误: {e}")
                continue

        if not texts:
            print("没有读取到有效文本数据！")
            return None

        embeddings = embedding_model.encode(texts, show_progress_bar=True).tolist()

        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=False),
            FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=expected_dim)
        ]
        schema = CollectionSchema(fields, description="Text embedding collection")

        collection = Collection(name=collection_name, schema=schema)
        collection.insert([ids, embeddings])
        collection.flush()
        print("插入完成！")

        # 创建索引
        index_params = {
            "metric_type": "COSINE",
            "index_type": "IVF_FLAT",  # 创建索引类型为 IVF_FLAT
            "params": {"nlist": 128}  # nlist 参数设置索引的分区数
        }
        collection.create_index(field_name="embedding", index_params=index_params)
        print("索引创建完成！")

    else:
        print(f"Collection '{collection_name}' 已存在，直接使用。")
        collection = Collection(name=collection_name)

    # 加载 Collection
    print(f"正在加载 Collection '{collection_name}'...")
    collection.load()
    print("加载完成。")

    # 加载 Collection 后检查是否为空
    if collection.is_empty:
        print(f"Collection '{collection_name}' 中没有数据，请先插入数据！")
        return None

    query_vector_dim = len(query_vector)
    if expected_dim != query_vector_dim:
        print(f"错误：查询向量的维度 {query_vector_dim} 与存储向量的维度 {expected_dim} 不一致！")
        return None

    search_params = {
        "metric_type": "COSINE",
        "params": {"nprobe": 10}
    }

    try:
        top_k = 1
        results = collection.search(
            data=[query_vector],
            anns_field="embedding",
            param=search_params,
            limit=top_k
        )
    except Exception as e:
        print(f"搜索失败：{e}")
        return None

    if len(results) == 0 or len(results[0]) == 0:
        print("搜索未返回结果，请检查向量维度或字段名。")
        return None
    else:
        print("Top-1 搜索结果:")
        for hit in results[0]:
            print(f"ID: {hit.id}, Distance: {hit.distance}")
        return results[0][0].id

def main():
    import sys
    from sentence_transformers import SentenceTransformer

    # 1. 创建查询向量
    query_text = "请输入您的查询文本"  # 可替换为命令行输入或外部参数
    model = SentenceTransformer('bert-base-nli-mean-tokens')
    query_vector = model.encode([query_text])[0].tolist()  # 确保维度与存储一致（768维）

    # 2. 执行搜索
    result_id = search_milvus(query_vector)
    if result_id is not None:
        print(f"最相似文档ID: {result_id}")
        # 可扩展：根据ID读取原始文本文件内容
        base_dir = os.getcwd()
        data_dir = os.path.join(base_dir, "data")
        file_list = [f for f in os.listdir(data_dir) if f.endswith('.txt')]
        if result_id < len(file_list):
            filename = file_list[result_id]
            with open(os.path.join(data_dir, filename), 'r', encoding='utf-8') as f:
                print("匹配内容：\n", f.read())
        else:
            print("未找到对应的原始文件")

if __name__ == "__main__":
    main()
