from urllib.request import urlretrieve
import h5py
import faiss

import numpy as np
import os
import sys
# current_dir = os.path.dirname(os.path.abspath(__file__))
# parent_dir = os.path.dirname(current_dir)
# print(f"current_dir: {current_dir}")
# sys.path.insert(0, parent_dir)
parent_dir = "/data1/mjb/flage-embedding-learing/3_Indexing"

"""
 Helper function
The following is a helper function for computing recall.
"""
# compute recall from the prediction results and ground truth
def compute_recall(res, truth):
    recall = 0
    for i in range(len(res)):
        intersect = np.intersect1d(res[i], truth[i])
        recall += len(intersect) / len(res[i])
    recall /= len(res)

    return recall

if __name__ == '__main__':
    """
    Dataset
    In this tutorial, we'll use SIFT1M, a very popular dataset for ANN evaluation, as our dataset to demonstrate the comparison.

    Run the following cell to download the dataset or you can also manually download from the repo ann-benchmarks)
    """
    data_url = "http://ann-benchmarks.com/sift-128-euclidean.hdf5"
    destination = f"{parent_dir}/sift-128-euclidean.hdf5"
    # 可以提前下载好文件
    # urlretrieve(data_url, destination)

    # Then load the data from the hdf5 file.
    with h5py.File(destination, 'r') as f:
        corpus = f['train'][:]
        query = f['test'][:]

    print(corpus.shape, corpus.dtype)
    print(query.shape, corpus.dtype)

    d = corpus[0].shape[0]
    k = 100

    """
    1. Flat Index
    Flat index use brute force to search neighbors for each query. It guarantees the optimal result with 100% recall. Thus we use the result from it as the ground truth.
    扁平索引使用暴力搜索每个查询的邻居。它保证了100%的召回率并提供最佳结果。因此，我们将其结果作为基准真相。
    """
    # CPU times: user 69.2 ms, sys: 80.6 ms, total: 150 ms Wall time: 149 ms
    index = faiss.IndexFlatL2(d)
    index.add(corpus)
    # CPU times: user 17min 30s, sys: 1.62 s, total: 17min 31s Wall time: 2min 1s
    D, I_truth = index.search(query, k)

    # 2. IVF Index
    nlist = 5
    nprob = 3
    # CPU times: user 10.6 s, sys: 831 ms, total: 11.4 s Wall time: 419 ms
    quantizer = faiss.IndexFlatL2(d)
    index = faiss.IndexIVFFlat(quantizer, d, nlist)
    index.nprobe = nprob

    index.train(corpus)
    index.add(corpus)

    # CPU times: user 9min 15s, sys: 598 ms, total: 9min 16s Wall time: 12.5 s
    D, I = index.search(query, k)
    # Recall: 0.9999189999999997
    recall = compute_recall(I, I_truth)
    print("IVF Index")
    print(f"Recall: {recall}")
    # From the test we can see that IVFFlatL2 has a pretty good promotion for the searching speed with a very tiny loss of recall. 从测试中我们可以看到，IVFFlatL2在搜索速度上有相当好的提升，同时召回率损失非常小。
    

    # 3. HNSW Index
    """
    From the searching time of less than 1 second, we can see why HNSW is one of the best choice when looking for an extreme speed during searching phase. The reduction of recall is acceptable. But the longer time during creation of index and large memory footprint need to be considered.
    从搜索时间少于1秒，我们可以看出为什么HNSW是寻求搜索阶段极端速度时最佳选择之一。召回率的降低是可以接受的。但在创建索引期间的较长时间和较大的内存占用需要考虑。
    """
    M = 64
    ef_search = 32
    ef_construction = 64
    # CPU times: user 11min 21s, sys: 595 ms, total: 11min 22s Wall time: 17 s
    index = faiss.IndexHNSWFlat(d, M)
    # set the two parameters before adding data
    index.hnsw.efConstruction = ef_construction
    index.hnsw.efSearch = ef_search

    index.add(corpus)
    # CPU times: user 5.14 s, sys: 3.94 ms, total: 5.14 s  Wall time: 110 ms
    D, I = index.search(query, k)

    # Recall: 0.8963409999999716
    recall = compute_recall(I, I_truth)
    print("HNSW Index")
    print(f"Recall: {recall}") 
   

    # 4. LSH
    """
    As we covered in the last notebook, LSH is not a good choice when the data dimension is large. Here 128 is already burdened for LSH. As we can see, even we choose a relatively small nbits of d * 8, the index creating time and search time are still pretty long. And the recall of about 58.6% is not satisfactory.

    正如我们在上一个笔记本中所讨论的，当数据维度较大时，本地敏感哈希(LSH)并不是一个好的选择。在这里，128已经对LSH造成了负担。正如我们所看到的，即使我们选择一个相对较小的nbits为d * 8，索引创建时间和搜索时间仍然相当长。大约58.6%的召回率也不令人满意。
    """
    nbits = d * 8
    # CPU times: user 13.7 s, sys: 660 ms, total: 14.4 s  Wall time: 12.1 s
    index = faiss.IndexLSH(d, nbits)
    index.train(corpus)
    index.add(corpus)
    # CPU times: user 3min 20s, sys: 84.2 ms, total: 3min 20s Wall time: 5.64 s
    D, I = index.search(query, k)
    # Recall: 0.5856720000000037
    recall = compute_recall(I, I_truth)
    print("LSH")
    print(f"Recall: {recall}")

    # 5. Scalar Quantizer Index
    """
    Here scalar quantizer index's performance looks very similar to the Flat index. Because the elements of vectors in the SIFT dataset are integers in the range of [0, 218]. Thus the index does not lose to much information during scalar quantization. For the dataset with more complex distribution in float32. The difference will be more obvious.

    这里，标量量化器索引的性能看起来与平面索引非常相似。因为SIFT数据集中向量的元素是范围在[0, 218]之间的整数。因此，索引在标量量化过程中不会损失太多信息。对于更复杂分布的float32数据集，差异会更明显。
    """
    qtype = faiss.ScalarQuantizer.QT_8bit
    metric = faiss.METRIC_L2
    # CPU times: user 550 ms, sys: 18 ms, total: 568 ms Wall time: 87.4 ms
    index = faiss.IndexScalarQuantizer(d, qtype, metric)
    index.train(corpus)
    index.add(corpus)
    # CPU times: user 7min 36s, sys: 169 ms, total: 7min 36s  Wall time: 12.7 s
    D, I = index.search(query, k)
    # Recall: 0.990444999999872
    recall = compute_recall(I, I_truth)
    print("Scalar Quantizer Index")
    print(f"Recall: {recall}")

    # 6. Product Quantizer Index
    """
    Product quantizer index is not standout in any one of the aspect. But it somewhat balance the tradeoffs. It is widely used in real applications with the combination of other indexes such as IVF or HNSW.
    产品量化器索引在任何一个方面都不是特别突出，但它在权衡各方面的 tradeoffs 方面能够达到某种平衡。它通常与其他索引（如 IVF 或 HNSW）结合在实际应用中广泛使用。
    """
    M = 16
    nbits = 8
    metric = faiss.METRIC_L2
    # CPU times: user 46.7 s, sys: 22.3 ms, total: 46.7 s  Wall time: 1.36 s
    index = faiss.IndexPQ(d, M, nbits, metric)

    index.train(corpus)
    index.add(corpus)
    # CPU times: user 1min 37s, sys: 106 ms, total: 1min 37s  Wall time: 2.8 s
    D, I = index.search(query, k)
    # Recall: 0.630898999999999
    recall = compute_recall(I, I_truth)
    print("Product Quantizer Index")
    print(f"Recall: {recall}")