# -*- coding: utf-8 -*-
from sentence_transformers import SentenceTransformer
import faiss

# 模型
model_name = "sentence-transformers/all-MiniLM-L4-v2"
model = SentenceTransformer(model_name)

# 句子
sentences = [
    "Artificial intelligence is the simulation of human intelligence.",
    "Machine learning is a subset of artificial intelligence.",
    "Natural language processing allows machines to interpret text.",
    "Deep learning enables machines to perform complex tasks.",
    "Data science uses statistical methods to extract knowledge.",
    # 假设更多的文档
]


# 分层索引：使用索引IVFPQ，结合IVF分层和PQ量化
def create_optimized_index(embeddings, n_clusters=5, pq_bits=8):
    dimension = embeddings.shape[1]
    quantizer = faiss.IndexFlatL2(dimension)
    index = faiss.IndexIVFPQ(quantizer, dimension,
                             n_clusters, pq_bits, 8)  # PQ量化
    index.train(embeddings)  # 训练量化器
    index.add(embeddings)  # 添加嵌入
    return index


# 检索函数
def optimized_search(query_text, index, top_k=3):
    query_embedding = model.encode([query_text])
    distances, indices = index.search(query_embedding, top_k)
    results = \
        [(sentences[i], distances[0][j]) for j, i in enumerate(indices[0])]
    return results


def main():
    # 使用Sentence-BERT生成嵌入
    embeddings = model.encode(sentences)

    # 查询示例
    query_text = "What is artificial intelligence?"
    optimized_index = create_optimized_index(embeddings)

    # 执行优化查询
    print("优化索引结果:")
    optimized_results = optimized_search(query_text, optimized_index)
    for doc, dist in optimized_results:
        print(f"文档: {doc}, 距离: {dist:.4f}")
    pass


if __name__ == '__main__':
    main()
    pass
