{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 多层异构检索架构的实现\n",
    "Multi-layer Heterogeneous Vector Indexer (MHVI)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import time\n",
    "import os\n",
    "import struct\n",
    "import faiss\n",
    "import subprocess\n",
    "import matplotlib.pyplot as plt\n",
    "import json"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "dataset_root_path: /home/ljl/Code/dataset/vector-ssd/\n",
      "dataset_path: /home/ljl/Code/dataset/vector-ssd//bigann/bigann_1M.bbin\n",
      "query_path: /home/ljl/Code/dataset/vector-ssd//bigann/bigann_query.bbin\n",
      "gt_path: /home/ljl/Code/dataset/vector-ssd//bigann/bigann_1M_gt.bin\n",
      "output_folder: bigann_1M_annlite_c16000_re2_pq64_R30/\n"
     ]
    }
   ],
   "source": [
    "# 索引结构\n",
    "build_mode_list = [\"annlite\", \"diskann\", \"spann\", \"aisaq\"]\n",
    "build_mode_index = 0 #选择第几个索引结构\n",
    "build_mode = build_mode_list[build_mode_index]\n",
    "\n",
    "# annlite就是三层结构\n",
    "# diskann就一层，L2采用node_type=1对原始数据集建图，然后保存pq表和pq向量以及图\n",
    "# spann是二层结构，L1采用node_type=2，L2没有，L3与annlite相同\n",
    "# aisaq也是单层结构，L2采用node_type=3对原始数据集建图\n",
    "\n",
    "# 数据集参数和索引参数等\n",
    "written = True #是否写入文件。避免测试时覆盖已写入文件\n",
    "evaluation = True #是否测试聚类精度。设置为False则是只训练聚类并导出\n",
    "# 写入文件时的对齐粒度，仅对图有效，因为L3无需对齐\n",
    "alignment_size = 4096\n",
    "\n",
    "dataset_name = [\"sift1m\", \"bigann1m\", \"deep1m\", \"gist1m\"][1]\n",
    "dataset_path = None\n",
    "query_path = None\n",
    "gt_path = None\n",
    "type = None\n",
    "#设置文件路径.注意所有向量数据集都转化为bin格式的，即头4个字节表示向量数量、紧接着4个表示每个向量的维数，然后是每个向量。里面不包含每个特征的大小，通过程序参数指定\n",
    "#为了便于在不同的机器上测试，把各个路径都放在一个列表中，通过遍历目录是否存在来找到合适的路径\n",
    "dataset_root_path_list = [\n",
    "    \"/home/gary/Code/MacCode/dataset/vector-ssd/\", #mac docker\n",
    "    \"/home/ljl/Code/dataset/vector-ssd/\", #185 docker\n",
    "    \"/mnt/nvme/ljl/Code/dataset/vector-ssd/\" #164\n",
    "]\n",
    "dataset_root_path = None\n",
    "for line in dataset_root_path_list:\n",
    "    if os.path.exists(line):\n",
    "        dataset_root_path = line\n",
    "        break\n",
    "print(\"dataset_root_path:\",dataset_root_path)\n",
    "if dataset_name == \"sift1m\":\n",
    "    type = np.float32\n",
    "    dataset_path = dataset_root_path+\"/sift1m/sift_1m_base.fbin\"\n",
    "    query_path = dataset_root_path+\"/sift1m/sift_query.fbin\"\n",
    "    gt_path = dataset_root_path+\"/sift1m/sift_1m_groundtruth.fbin\"\n",
    "elif dataset_name == \"bigann1m\":\n",
    "    type = np.uint8\n",
    "    dataset_path = dataset_root_path+\"/bigann/bigann_1M.bbin\"\n",
    "    query_path = dataset_root_path+\"/bigann/bigann_query.bbin\"\n",
    "    gt_path = dataset_root_path+\"/bigann/bigann_1M_gt.bin\"\n",
    "elif dataset_name == \"deep1m\":\n",
    "    type = np.float32\n",
    "    dataset_path = dataset_root_path+\"/deep/deep_1M.fbin\"\n",
    "    query_path = dataset_root_path+\"/deep/queries.fbin\"\n",
    "    gt_path = dataset_root_path+\"/deep/deep_1M_gt.bin\"\n",
    "elif dataset_name == \"gist1m\":\n",
    "    type = np.float32\n",
    "    alignment_size = 4096*4 #粒度小了无法存储\n",
    "    dataset_path = dataset_root_path+\"/gist/gist_base.bin\"\n",
    "    query_path = dataset_root_path+\"/gist/gist_query.bin\"\n",
    "    gt_path = dataset_root_path+\"/gist/gist_groundtruth.bin\"\n",
    "\n",
    "print(\"dataset_path:\", dataset_path)\n",
    "print(\"query_path:\", query_path)\n",
    "print(\"gt_path:\", gt_path)\n",
    "\n",
    "# #bigann100M\n",
    "# dataset_path = \"/home/gary/Code/DiskANN/build/data/bigann/bigann_learn.bbin\"\n",
    "# query_path = \"/home/gary/Code/DiskANN/build/data/bigann/bigann_query.bbin\"\n",
    "# gt_path = \"/home/gary/Code/DiskANN/build/data/bigann/bigann_gt.ibin\"\n",
    "\n",
    "# 聚类参数\n",
    "cluster_count = 16000 #设置聚类数量\n",
    "train_ratio = 1 #设置训练比例，例如1是全部向量用来训练\n",
    "cluster_redundancy = 2 #设置冗余数量，即一个点会被分到距离他最近的多少个聚类里面\n",
    "\n",
    "# 查询参数。这个只在builder的evaluation时使用\n",
    "k = 10 #返回多少个近邻\n",
    "nprobe = 10 #查询多少个聚类\n",
    "max_query = 1000 #给前多少个查询计算gt文件\n",
    "gt_k = 100 #计算的gt文件里面包含多少个近邻\n",
    "\n",
    "# 建图时的参数\n",
    "l2_graph_R = 30  #平均邻居数，不能过小\n",
    "# l2_graph_node_type = 3 #后面再指定\n",
    "\n",
    "graph_type = [\"nsg\", \"vamana\"][0] #选择建图算法\n",
    "# 若选择diskann则需要指定下列参数\n",
    "diskann_executor_path = \"/home/ljl/Code/DiskANN/build/apps/build_disk_index\" #diskann可执行程序的路径\n",
    "diskann_build_L = 100  #构建时的候选列表长度,越长精度越高\n",
    "diskann_search_dram_budget = 0 #目标内存大小由PQ_bucket和向量数量计算得出\n",
    "diskann_build_dram_limit = 120 #构建时的可用内存（GB），影响建图速度和精度，系统允许下越大越好\n",
    "diskann_page_size = 4096 #DiskANN中一个页面的大小，仅用于读取DiskANN的参数\n",
    "\n",
    "# 创建PQ向量的参数\n",
    "pq_bucket = 64 #将向量分为多少个桶，需要能够整除向量维度\n",
    "pq_bit = 8 #用多少个bit来表示一个维度。目前只支持了8bit\n",
    "\n",
    "# 输出文件夹\n",
    "# 获取当前工作目录\n",
    "# output_folder = os.getcwd() + \"/\"\n",
    "output_folder = \"\"\n",
    "output_folder += dataset_path.split(\"/\")[-1].split(\".\")[0]\n",
    "\n",
    "if(build_mode == \"annlite\"):\n",
    "    output_folder += \"_annlite\"\n",
    "    output_folder += \"_c\" + str(cluster_count)\n",
    "    output_folder += \"_re\" + str(cluster_redundancy)\n",
    "elif(build_mode == \"diskann\"):\n",
    "    output_folder += \"_diskann\"\n",
    "elif(build_mode == \"spann\"):\n",
    "    output_folder += \"_spann\"\n",
    "    output_folder += \"_c\" + str(cluster_count)\n",
    "    output_folder += \"_re\" + str(cluster_redundancy)\n",
    "elif(build_mode == \"aisaq\"):\n",
    "    output_folder += \"_aisaq\"\n",
    "\n",
    "output_folder += \"_pq\" + str(pq_bucket)\n",
    "output_folder += \"_R\" + str(l2_graph_R)\n",
    "output_folder += \"/\"\n",
    "print(\"output_folder:\",output_folder)\n",
    "\n",
    "# 1.底层IVF索引路径\n",
    "# 聚类中心文件路径\n",
    "cluster_center_path = output_folder+\"cluster_centers.bin\" #聚类中心点, 用于DiskANN建图用\n",
    "# 底层的偏移量文件路径\n",
    "offset_list_path = output_folder+\"offset_list.bin\" #偏移量数组，用于在last_layer中查询该聚类的偏移量。可以将长度固定来避免索引开销\n",
    "# 底层的聚类文件\n",
    "last_layer_path = output_folder+\"last_layer.bin\" #存储每个聚类内的向量\n",
    "\n",
    "# 2.中间层索引路径\n",
    "pq_vector_path = output_folder+\"centroid_pq_vector.bin\" #存储聚类中心向量的PQ编码向量\n",
    "pq_table_path = output_folder+\"centroid_pq_table.bin\" #存储聚类中心向量的PQ编码表\n",
    "pq_centorid_path = output_folder+\"centroid_pq_centroid.bin\" #存储PQ聚类中心点，里面只有一个向量。如果不是diskann建图的话里面为全0\n",
    "l2_graph_root_path = output_folder+\"L2_graph.bin\" #这个只是基础路径，根据不同的图节点类型在后面会加上.type0等后缀\n",
    "l2_gt_path = output_folder+\"L2_graph_gt.bin\"\n",
    "diskann_output_path = output_folder+\"diskann_output/\"\n",
    "#可以在查询器中的获取聚类中心分为2个方式，保持接口相同，即输入聚类id获取聚类中心向量，\n",
    "#一种是直接从cluster_center_path中读取向量\n",
    "#一种是使用PQ量化器读取向量\n",
    "#两种分别实现后便于实验对比，使用量化器后精度损失了多少\n",
    "\n",
    "# 3.顶层索引\n",
    "l1_graph_path = output_folder+\"L1_graph.bin\" #存储顶层图的邻接表\n",
    "l1_gt_path = output_folder+\"L1_graph_gt.bin\"\n",
    "l1_graph_vector_path = output_folder+\"L1_graph_vector.bin\" #存储顶层图的向量,与l2的向量有重复的部分，后续可以优化掉\n",
    "l1_l2_graph_node_mapping_path = output_folder+\"L1_L2_graph_node_mapping.bin\" #存储L1图的节点到L2图的节点映射关系\n",
    "l1_graph_sample_ratio = 0.1 #设置采样比例，例如0.1表示每个聚类只保留10%的节点. 如果采样模式是ncs，则表示L1的总节点数量\n",
    "l1_simpling_method = [\"random\", \"ncs\"][1] #random: 纯随机抽样建图；ncs:Neighbor-Capturing Sampling, 邻居捕获采样.随机采样一小部分点然后采样这些点的邻居.\n",
    "l1_ncs_threshold = 1 #当被采样到的节点的邻居，这些点的邻居也被采样到达到该比例后同样也被标记为不用访问\n",
    "l1_cachelist_path = output_folder+\"L1_cache_list.bin\" #标记l1的哪些点是完全缓存的，l2可以不用重新访问了\n",
    "l1_graph_R = l2_graph_R #现阶段和L2图的R保持一致，后续可以优化\n",
    "# l1_graph_node_type = 2 #这里的node_type与L2的语义是一致的。1表示把原始向量接入图中，2表示接入PQ向量; 2是默认，代表annlite\n",
    "\n",
    "#配置文件路径，用于给searcher传递参数\n",
    "config_path = output_folder+\"dataset_config.json\"\n",
    "user_config_path = output_folder+\"user_config.json\" "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "build_mode: annlite\n"
     ]
    }
   ],
   "source": [
    "#重新设定参数\n",
    "if(build_mode == \"annlite\"):\n",
    "    print(\"build_mode: annlite\")\n",
    "    l1_graph_node_type = 2\n",
    "    l2_graph_node_type = 4\n",
    "elif(build_mode == \"diskann\"):\n",
    "    print(\"build_mode: diskann\")\n",
    "    l1_graph_node_type = None\n",
    "    l2_graph_node_type = 1\n",
    "elif(build_mode == \"spann\"):\n",
    "    print(\"build_mode: spann\")\n",
    "    l1_graph_node_type = None\n",
    "    l2_graph_node_type = 2 #不要l1，l2图用来在内存中常驻，图里面包含向量信息\n",
    "elif(build_mode == \"aisaq\"):\n",
    "    print(\"build_mode: aisaq\")\n",
    "    l1_graph_node_type = None\n",
    "    l2_graph_node_type = 4\n",
    "    pq_bucket = 4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "metadata": {},
   "outputs": [],
   "source": [
    "#检查output_folder是否存在，不存在则创建\n",
    "if not os.path.exists(output_folder):\n",
    "    os.makedirs(output_folder)\n",
    "\n",
    "#检查数据集文件是否存在\n",
    "if not os.path.exists(dataset_path):\n",
    "    print(\"dataset_path:\",dataset_path,\"not exists\")\n",
    "    pause()\n",
    "if evaluation and not os.path.exists(query_path):\n",
    "    print(\"query_path:\",query_path,\"not exists\")\n",
    "    pause()\n",
    "if evaluation and not os.path.exists(gt_path):\n",
    "    print(\"gt_path:\",gt_path,\"not exists\")\n",
    "    pause()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "metadata": {},
   "outputs": [],
   "source": [
    "#读取bin格式的数据集文件，数据集、查询集、GT集都可以使用这个函数读取\n",
    "def read_bin(file_path, type):\n",
    "    file_size = os.path.getsize(file_path)\n",
    "    print(\"Read data from\", file_path, \"\\nfile size:\",file_size)\n",
    "    feature_size = np.dtype(type).itemsize\n",
    "    with open(file_path,\"rb\") as fd:\n",
    "        lines = int.from_bytes(fd.read(4), byteorder='little')\n",
    "        dim = int.from_bytes(fd.read(4), byteorder='little')\n",
    "        print(\"lines:\",lines,\"dim:\",dim)\n",
    "\n",
    "        data_size = lines * dim * feature_size\n",
    "        if(data_size+8 != file_size):\n",
    "            print(f\"Error! file size {file_size} and argument {data_size+8} not match!\") # 判断实际文件大小是否与参数匹配，简单的纠错机制\n",
    "            return None\n",
    "\n",
    "        binary_data = fd.read(data_size)\n",
    "        vectors = np.frombuffer(binary_data, dtype=type)\n",
    "        vectors = vectors.reshape(lines, dim)\n",
    "        print(\"Returned vector list:\",vectors.shape, vectors.dtype)\n",
    "\n",
    "        return vectors\n",
    "    \n",
    "#计算Recall，I是搜索结果，gts是ground truth，k是计算前多少个搜索结果的召回率\n",
    "def compute_recall(I, gts, k):\n",
    "    num_queries = I.shape[0]\n",
    "    recall_sum = 0\n",
    "    \n",
    "    for i in range(num_queries):\n",
    "        retrieved = set(I[i])\n",
    "        ground_truth = set(gts[i][:k])  # 取前k个真实结果\n",
    "        correct = len(retrieved.intersection(ground_truth))\n",
    "        recall_sum += correct / k\n",
    "    \n",
    "    return recall_sum / num_queries\n",
    "\n",
    "\n",
    "#将向量集以.bin的格式写入文件中，主要用于写入质心\n",
    "def write_bin(filename, array):\n",
    "    # 检查输入是否为numpy数组\n",
    "    if not isinstance(array, np.ndarray):\n",
    "        raise ValueError(\"Input must be a NumPy array.\")\n",
    "    \n",
    "    # 获取数组的形状（行数和列数）\n",
    "    rows, cols = array.shape\n",
    "    \n",
    "    # 打开文件准备写入\n",
    "    with open(filename, 'wb') as f:\n",
    "        # 将行数和列数写入文件的前8个字节\n",
    "        np.array(rows, dtype=np.uint32).tofile(f)\n",
    "        np.array(cols, dtype=np.uint32).tofile(f)\n",
    "        \n",
    "        # 写入数组数据\n",
    "        # 确保数据类型正确，这里假设使用float32\n",
    "        array.tofile(f)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "#计算查询集在dataset中的ground truth文件，用于计算每层的召回率\n",
    "def compute_gt(dataset, query_set, gt_file_path, k):\n",
    "    print(\"Computing ground truth on dataset including\", dataset.shape[0], \"vectors for\", query_set.shape[0], \"queries and\", k, \"ground truths per query\")\n",
    "    #记录起始时间\n",
    "    start_time = time.time()\n",
    "    \n",
    "    gt_array = np.zeros((query_set.shape[0], k), dtype=np.uint32)\n",
    "    for i in range(query_set.shape[0]):\n",
    "        if(i % 1000 == 0):\n",
    "            print(\"Computed ground truth for query\", i)\n",
    "        query = query_set[i]\n",
    "        distances = np.linalg.norm(dataset - query, axis=1)\n",
    "        indices = np.argsort(distances)[:k]\n",
    "        gt_array[i] = indices\n",
    "    \n",
    "    with open(gt_file_path, 'wb') as f:\n",
    "        np.array(gt_array.shape[0], dtype=np.uint32).tofile(f)\n",
    "        np.array(gt_array.shape[1], dtype=np.uint32).tofile(f)\n",
    "        gt_array.tofile(f)\n",
    "\n",
    "    print(\"Ground truth file saved to\", gt_file_path)\n",
    "    end_time = time.time()\n",
    "    print(f\"Time taken: {end_time - start_time} seconds\")\n",
    "\n",
    "from concurrent.futures import ThreadPoolExecutor\n",
    "import threading\n",
    "def compute_gt_multithread(dataset, query_set, gt_file_path, k, num_threads=16):\n",
    "    \"\"\"\n",
    "    计算查询集在dataset中的ground truth文件，用于计算每层的召回率\n",
    "    \n",
    "    参数:\n",
    "        dataset: 数据集向量\n",
    "        query_set: 查询集向量\n",
    "        gt_file_path: 保存ground truth的文件路径\n",
    "        k: 每个查询需要的最近邻数量\n",
    "        num_threads: 使用的线程数，默认为CPU核心数\n",
    "    \"\"\"\n",
    "    print(f\"Computing ground truth on dataset including {dataset.shape[0]} vectors for {query_set.shape[0]} queries and {k} ground truths per query\")\n",
    "    #记录起始时间\n",
    "    start_time = time.time()\n",
    "\n",
    "    gt_array = np.zeros((query_set.shape[0], k), dtype=np.uint32)\n",
    "    \n",
    "    # 每个线程处理的查询数量\n",
    "    queries_per_thread = query_set.shape[0] // num_threads\n",
    "    if queries_per_thread == 0:\n",
    "        queries_per_thread = 1\n",
    "        num_threads = min(num_threads, query_set.shape[0])\n",
    "    \n",
    "    # 共享计数器和锁\n",
    "    processed_count = 0\n",
    "    count_lock = threading.Lock()\n",
    "\n",
    "    # 定义线程处理函数\n",
    "    def process_queries(start_idx, end_idx):\n",
    "        nonlocal processed_count\n",
    "        for i in range(start_idx, end_idx):\n",
    "            query = query_set[i]\n",
    "            diff = dataset.astype('float32') - query #直接以uint8进行计算的话会出错\n",
    "            distances = np.linalg.norm(diff, axis=1)\n",
    "            indices = np.argsort(distances)[:k]\n",
    "            gt_array[i] = indices\n",
    "            \n",
    "            # 更新计数器并检查是否需要打印进度\n",
    "            with count_lock:\n",
    "                processed_count += 1\n",
    "                if processed_count % 1000 == 0:\n",
    "                    print(f\"Computed ground truth for {processed_count} queries\")\n",
    "    \n",
    "    # 创建并执行线程池\n",
    "    with ThreadPoolExecutor(max_workers=num_threads) as executor:\n",
    "        futures = []\n",
    "        for i in range(num_threads):\n",
    "            start_idx = i * queries_per_thread\n",
    "            end_idx = start_idx + queries_per_thread\n",
    "            if i == num_threads - 1:\n",
    "                end_idx = query_set.shape[0]  # 最后一个线程处理剩余的查询\n",
    "            \n",
    "            if start_idx < end_idx:\n",
    "                futures.append(executor.submit(process_queries, start_idx, end_idx))\n",
    "        \n",
    "        # 等待所有线程完成\n",
    "        for future in futures:\n",
    "            future.result()\n",
    "    \n",
    "    # 保存结果\n",
    "    with open(gt_file_path, 'wb') as f:\n",
    "        np.array(gt_array.shape[0], dtype=np.uint32).tofile(f)\n",
    "        np.array(gt_array.shape[1], dtype=np.uint32).tofile(f)\n",
    "        gt_array.tofile(f)\n",
    "    \n",
    "    print(f\"Ground truth file saved to {gt_file_path}\")    \n",
    "    end_time = time.time()\n",
    "    print(f\"Time taken: {end_time - start_time} seconds\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Read data from /home/ljl/Code/dataset/vector-ssd//bigann/bigann_1M.bbin \n",
      "file size: 128000008\n",
      "lines: 1000000 dim: 128\n",
      "Returned vector list: (1000000, 128) uint8\n",
      "[[  0   0   0 ...  14  10   6]\n",
      " [ 65  35   8 ...   0   0   0]\n",
      " [  0   0   0 ...   1   0   0]\n",
      " ...\n",
      " [  0   0   1 ...   0   0   2]\n",
      " [ 21 103   1 ...  37   2   0]\n",
      " [  0   2   0 ...   4   2   7]]\n",
      "Read data from /home/ljl/Code/dataset/vector-ssd//bigann/bigann_query.bbin \n",
      "file size: 1280008\n",
      "lines: 10000 dim: 128\n",
      "Returned vector list: (10000, 128) uint8\n",
      "[[  3   9  17 ...  15  12  27]\n",
      " [  1   2   8 ...   0   0  10]\n",
      " [141  40   0 ...   8   7  30]\n",
      " ...\n",
      " [ 37  24   5 ...  50   8   0]\n",
      " [ 12   0   2 ...   6   1   3]\n",
      " [ 14  11  13 ...  39  16   0]]\n",
      "Read data from /home/ljl/Code/dataset/vector-ssd//bigann/bigann_1M_gt.bin \n",
      "file size: 4000008\n",
      "lines: 10000 dim: 100\n",
      "Returned vector list: (10000, 100) uint32\n",
      "[[504814 344333 900184 ... 296269 648063 292756]\n",
      " [588616 528450 346239 ... 818916  11776 824573]\n",
      " [552515 869848 461275 ... 938718 390646 645167]\n",
      " ...\n",
      " [271339 929611 406152 ... 928933  27184 955256]\n",
      " [266450 225643 420764 ...  81608 981736  81649]\n",
      " [511237 333389  86112 ... 908944 318717 278013]]\n"
     ]
    }
   ],
   "source": [
    "#读取数据集\n",
    "dataset = read_bin(dataset_path, type)\n",
    "print(dataset)\n",
    "\n",
    "queries = None\n",
    "gts = None\n",
    "if(evaluation):\n",
    "    queries = read_bin(query_path, type)\n",
    "    print(queries)\n",
    "\n",
    "    gts = read_bin(gt_path, np.uint32)\n",
    "    print(gts)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 91,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Start train\n",
      "Train done, spend: 17.7 second\n",
      "Start add vector\n",
      "Add done, spend: 2.1 second\n",
      "Writing index\n",
      "Write done\n"
     ]
    }
   ],
   "source": [
    "#用faiss的IVF库来构建IVF索引\n",
    "IVF_index = faiss.index_factory(dataset.shape[1], f\"IVF%d,Flat\"%cluster_count)  # 创建一个IVF索引\n",
    "\n",
    "if(build_mode == \"annlite\" or build_mode == \"spann\"):\n",
    "    # 训练索引\n",
    "    print(\"Start train\")\n",
    "    st = time.time()\n",
    "    step = int(1/train_ratio) #步长，即我们间隔多远取一个向量来训练\n",
    "    IVF_index.train(dataset[::step])\n",
    "    et = time.time()\n",
    "    print(\"Train done, spend: %.1f second\"%(et-st))\n",
    "\n",
    "    # 添加向量到索引\n",
    "    print(\"Start add vector\")\n",
    "    st = time.time()\n",
    "    IVF_index.add(dataset)\n",
    "    et = time.time()\n",
    "    print(\"Add done, spend: %.1f second\"%(et-st))\n",
    "\n",
    "    #保存索引\n",
    "    #创建用户名\n",
    "    dataset_name = dataset_path.split(\"/\")[-1].split(\".\")[0]\n",
    "    if(written):\n",
    "        print(\"Writing index\")\n",
    "        # faiss.write_index(IVF_index, output_folder+\"ivf_index_%s_%d.faiss\"%(dataset_name,cluster_count)) #输出的这个作用不大可以不管\n",
    "        print(\"Write done\")\n",
    "    else:\n",
    "        print(\"Not write index\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.21950999999999335\n",
      "0.3336899999999914\n",
      "0.46840999999999877\n",
      "0.6087099999999984\n",
      "0.7388599999999921\n",
      "0.8436599999999815\n"
     ]
    }
   ],
   "source": [
    "#第三层的召回精度测试，同时也是聚类数量对召回精度的影响测试\n",
    "if(build_mode == \"annlite\" or build_mode == \"spann\"):\n",
    "    if(evaluation):\n",
    "        for i in [1,2,4,8,16,32]: #搜索的聚类个数\n",
    "            IVF_index.nprobe = i\n",
    "            D, I = IVF_index.search(queries, k)  # 实际上搜索索引\n",
    "            # 对D的距离进行开方\n",
    "            # D = np.sqrt(D)\n",
    "            print(compute_recall(I, gts, k))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 93,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[47.294117  23.441177  23.735294  ... 27.088236  27.058825  21.941177 ]\n",
      " [27.774193  19.806452  11.564516  ... 23.677418  16.016129  22.306452 ]\n",
      " [14.185185  14.185185  25.87037   ... 11.462963   3.1666667  6.2777777]\n",
      " ...\n",
      " [17.768421   9.757895  12.884212  ... 11.063158  16.421053  51.3579   ]\n",
      " [22.415094  24.943398  25.452831  ...  5.3773584  6.264151  17.528301 ]\n",
      " [29.4       15.179999   8.639999  ... 16.64      16.66      17.82     ]]\n",
      "(16000, 128)\n",
      "float32\n",
      "cluster center has written to: bigann_1M_annlite_c16000_re2_pq64_R30/cluster_centers.bin\n"
     ]
    }
   ],
   "source": [
    "#获取量化器，用于获取质心\n",
    "quantizer = IVF_index.quantizer\n",
    "centroids = np.zeros((cluster_count, dataset.shape[1]), dtype=np.float32)\n",
    "if(build_mode == \"annlite\" or build_mode == \"spann\"):\n",
    "    quantizer.reconstruct_n(0, cluster_count, centroids)\n",
    "\n",
    "    print(centroids) # 打印质心，即聚类中心。质心并不是数据集中的一个点，只是一个虚拟的中心\n",
    "    print(centroids.shape)\n",
    "    print(centroids.dtype)\n",
    "\n",
    "    #写入质心\n",
    "    if(written):\n",
    "        write_bin(cluster_center_path, centroids)\n",
    "        print(\"cluster center has written to:\",cluster_center_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 94,
   "metadata": {},
   "outputs": [],
   "source": [
    "#获取向量所属的聚类ID\n",
    "# def get_cluster_id_for_vector(vector, quantizer):\n",
    "#     # 使用 search 方法获取向量所属的聚类 ID\n",
    "#     _, cluster_id = quantizer.search(np.expand_dims(vector, axis=0), 1) #第二个参数为返回个数\n",
    "#     return cluster_id[0][0]\n",
    "\n",
    "#批量获取每个向量所属的聚类ID\n",
    "def get_cluster_id_for_vectors(vectors, quantizer, retrieve_k):\n",
    "    # 使用 search 方法获取向量所属的聚类ID\n",
    "    _, cluster_ids = quantizer.search(vectors, retrieve_k)\n",
    "    #将cluster_ids转化为一个一维数组\n",
    "    # cluster_ids = cluster_ids.flatten()\n",
    "    return cluster_ids\n",
    "\n",
    "#依次获取dateset所有向量的聚类ID，并记录在一个数组中。数组长度等于聚类数量，每个元素代表每个聚类的向量数量\n",
    "def statis_cluster(vectors, quantizer, retrieve_k):\n",
    "    cluster_ids = get_cluster_id_for_vectors(vectors, quantizer, retrieve_k) #得到的是一个行为向量个数，列为k的二维数组\n",
    "    cluster_ids_statis = [0]*cluster_count #统计每个聚类的出现次数\n",
    "\n",
    "    for ids in cluster_ids:\n",
    "        for id in ids:\n",
    "            cluster_ids_statis[id]+=1\n",
    "    return cluster_ids_statis, max(cluster_ids_statis), min(cluster_ids_statis)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "聚类元素数量最大值： 659 最小值： 1\n"
     ]
    }
   ],
   "source": [
    "if(build_mode == \"annlite\" or build_mode == \"spann\"):\n",
    "    # ids = get_cluster_id_for_vectors(dataset,quantizer)\n",
    "    statis_result, max_value, min_value = statis_cluster(dataset, quantizer, cluster_redundancy)\n",
    "\n",
    "    #a是一个一维数组，里面每个元素表示统计出的出现次数，画出一个统计出现次数的直方图\n",
    "    # plt.hist(statis_result, bins=100)\n",
    "    # plt.show()\n",
    "\n",
    "    #统计a列表中元素的最大值、最小值和平均值\n",
    "    print(\"聚类元素数量最大值：\", max_value, \"最小值：\", min_value)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[      0      67     172 ... 1999805 1999891 2000000]\n",
      "Write offset list to file\n",
      "Write last layer to file\n"
     ]
    }
   ],
   "source": [
    "if(build_mode == \"annlite\" or build_mode == \"spann\"):\n",
    "    #获取每个向量的所属聚类的id\n",
    "    cluster_ids_of_dataset = get_cluster_id_for_vectors(dataset,quantizer,cluster_redundancy)\n",
    "\n",
    "    #按照ids中记录的每个向量的中心点id，将dataset中的向量整理为一个二维数组\n",
    "    #初始化空二维数组\n",
    "    clustered_dataset = [[] for i in range(cluster_count)]\n",
    "\n",
    "    # for i in range(len(ids)):\n",
    "    #     clustered_dataset[ids[i]].append(i)\n",
    "    for node_id in range(cluster_ids_of_dataset.shape[0]):\n",
    "        for cid_index in range(cluster_ids_of_dataset.shape[1]):\n",
    "            clustered_dataset[cluster_ids_of_dataset[node_id][cid_index]].append(node_id)\n",
    "\n",
    "    #将clustered_dataset转化为一个np数组，其中记录的是clustered_dataset每行的元素数量\n",
    "    clustered_dataset_statis = np.array([len(i) for i in clustered_dataset])\n",
    "\n",
    "    #记录偏移量数组\n",
    "    offset_list = np.zeros(shape=(cluster_count+1,),dtype=np.int32)\n",
    "    for i in range(cluster_count):\n",
    "        offset_list[i] = np.sum(clustered_dataset_statis[:i])\n",
    "    offset_list[-1] = cluster_ids_of_dataset.size\n",
    "\n",
    "    print(offset_list[:])\n",
    "    #将该数组以二进制写入文件\n",
    "    with open(offset_list_path,'wb') as f:\n",
    "        f.write(struct.pack('i',offset_list.shape[0]))\n",
    "        f.write(struct.pack('i',1))\n",
    "        f.write(offset_list.tobytes())\n",
    "    print(\"Write offset list to file\")\n",
    "\n",
    "    #按照聚类结果，将dataset依次写入文件。每个向量的头4字节表示该向量的id（因为聚类后把顺序打乱了）\n",
    "    with open(last_layer_path,'wb') as f:\n",
    "        #对于每个聚类，写入该聚类中的向量\n",
    "        for line in clustered_dataset:\n",
    "            for id in line:\n",
    "                #将id写入文件\n",
    "                f.write(struct.pack('i',id))\n",
    "                #将np数组dataset[id]写入文件\n",
    "                f.write(dataset[id].tobytes())\n",
    "    print(\"Write last layer to file\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 97,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "PQ_index train start, shape: (16000, 128)\n",
      "PQ_index train done\n",
      "PQ_index add done\n",
      "PQ 压缩向量 pq_codes（NumPy 数组形式）：\n",
      "(16000, 64) uint8\n",
      "PQ表 pq_table（NumPy 数组形式）：\n",
      "(64, 256, 2) float32\n"
     ]
    }
   ],
   "source": [
    "#建PQ向量。如果用的nsg就用faiss，如果是用的vamana就从文件中读取pq向量\n",
    "pq_codes = None\n",
    "if(graph_type == \"nsg\"):\n",
    "    PQ_index = faiss.IndexPQ(dataset.shape[1], pq_bucket, pq_bit)\n",
    "    pq_dataset = None\n",
    "    #annlite和spann需要对中心点进行PQ压缩，DiskANN和AISAQ是对原始向量建图，然后对原始向量进行PQ压缩并获取向量的PQ表示\n",
    "    if(build_mode == \"annlite\" or build_mode == \"spann\"):\n",
    "        pq_dataset = centroids\n",
    "    elif(build_mode == \"diskann\" or build_mode == \"aisaq\"):\n",
    "        pq_dataset = dataset\n",
    "    print(\"PQ_index train start, shape:\",pq_dataset.shape)\n",
    "    # 对中心点进行PQ压缩\n",
    "    PQ_index.train(pq_dataset)\n",
    "    print(\"PQ_index train done\")\n",
    "    PQ_index.add(pq_dataset)\n",
    "    print(\"PQ_index add done\")\n",
    "\n",
    "    # 获取 PQ 压缩向量并转换为 NumPy 数组\n",
    "    # 里面存的是压缩后的每个向量的压缩表示，但是faiss会把他压缩存储进uint8类型\n",
    "    # pq_codes的长度=簇心数量*量化bit/8\n",
    "    pq_codes = faiss.vector_to_array(PQ_index.codes)\n",
    "    pq_codes = pq_codes.reshape(pq_dataset.shape[0], pq_bucket)\n",
    "    print(\"PQ 压缩向量 pq_codes（NumPy 数组形式）：\")\n",
    "    print(pq_codes.shape, pq_codes.dtype)\n",
    "\n",
    "    pq_table = PQ_index.pq \n",
    "    pq_table = faiss.vector_to_array(pq_table.centroids)\n",
    "    pq_table = pq_table.reshape(pq_bucket, 2**pq_bit, -1)\n",
    "    print(\"PQ表 pq_table（NumPy 数组形式）：\")\n",
    "    print(pq_table.shape, pq_table.dtype)\n",
    "    pq_table = pq_table.reshape(pq_bucket, -1)\n",
    "\n",
    "    #写入PQ表和PQ压缩向量\n",
    "    write_bin(pq_vector_path, pq_codes)\n",
    "    write_bin(pq_table_path, pq_table)\n",
    "\n",
    "    #创建一个空白的PQ中心点，和DiskANN的PQ解压缩保持一直\n",
    "    pq_centorid = np.zeros((1,dataset.shape[1]),dtype=np.float32) #不知道uint8聚类后的数据类型，先用float32\n",
    "    write_bin(pq_centorid_path, pq_centorid)\n",
    "    \n",
    "#如果是vamana图，则PQ向量需要在建图之后才能获取"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义图结构，用于存储图以及写入图\n",
    "#图的结构用一个np数组表示，行数表示节点数，每行的列数固定，不足的部分填0\n",
    "class Graph:\n",
    "    def __init__(self, n, max_neighbor):\n",
    "        #值初始化为-1\n",
    "        self.graph = np.full((n, max_neighbor), -1, dtype=np.int32)        \n",
    "\n",
    "        self.n = n\n",
    "        self.max_neighbor = max_neighbor\n",
    "        self.neighbor_table_size = self.max_neighbor * 4\n",
    "        self.vector_size = 0 #默认是纯图的类型，type为0\n",
    "        self.node_type = 0\n",
    "        self.vectors = None\n",
    "        self.raw_vectors = None #node_type为4时使用。用于在后面再追加一个原始向量\n",
    "\n",
    "    def update_node_type(self, vectors, type, raw_vectors=None):\n",
    "        self.node_type = type\n",
    "        self.vectors = vectors #存储节点对应的向量信息，如果是None则表示没有传入节点信息\n",
    "        if self.vectors is not None: #检查传入的向量行数是否正确\n",
    "            assert self.vectors.shape[0] == self.n\n",
    "            print(f'Update graph vector shape: {self.vectors.shape} in type {self.vectors.dtype}')\n",
    "            self.vectors = self.vectors.reshape((self.n, -1))\n",
    "            self.vector_size = self.vectors.shape[1] * self.vectors.itemsize\n",
    "        \n",
    "        if self.node_type == 4: #如果节点类型为4，则需要额外存储原始向量\n",
    "            self.raw_vectors = raw_vectors\n",
    "            self.node_size = self.neighbor_table_size + self.vector_size + self.raw_vectors.shape[1] * self.raw_vectors.itemsize\n",
    "            print(f\"Updated graph node size(B): {self.node_size}, neighbor_table_size(B): {self.neighbor_table_size}, vector_size(B): {self.vector_size}, raw_vectors_size(B): {self.raw_vectors.shape[1] * self.raw_vectors.itemsize}\")\n",
    "        else:\n",
    "            self.node_size = self.neighbor_table_size + self.vector_size\n",
    "            print(f\"Updated graph node size(B): {self.node_size}, neighbor_table_size(B): {self.neighbor_table_size}, vector_size(B): {self.vector_size}\")\n",
    "\n",
    "    #构建AiSAQ的结构\n",
    "    def build_AiSAQ(self, pq_codes):\n",
    "        neibor_pq_codes = np.zeros((self.n, self.max_neighbor, pq_codes.shape[1]), dtype=pq_codes.dtype)\n",
    "        for i in range(self.n):\n",
    "            for j in range(self.max_neighbor):\n",
    "                if self.graph[i][j] != -1:\n",
    "                    neibor_pq_codes[i][j] = pq_codes[self.graph[i][j]]\n",
    "        return neibor_pq_codes\n",
    "\n",
    "    def print(self, print_all=False):\n",
    "        threshold = 3\n",
    "        if print_all:\n",
    "            threshold = np.inf\n",
    "        np.set_printoptions(threshold=threshold)\n",
    "        print(f'graph shape: {self.graph.shape}')\n",
    "        print(self.graph)\n",
    "        np.set_printoptions(threshold=3)\n",
    "\n",
    "    def save_as_bin(self, path, alignment=4096): #写入时的对齐粒度\n",
    "        node_per_page = alignment // self.node_size\n",
    "        print(f\"Alignment size: {alignment}, self.node_size: {self.node_size}, node per page: {node_per_page}\")\n",
    "        if node_per_page <= 0:\n",
    "            raise ValueError(\"Alignment size must be greater than node size\")\n",
    "\n",
    "        with open(path, 'wb') as f:\n",
    "            f.write(struct.pack('I', self.n))\n",
    "            f.write(struct.pack('I', self.max_neighbor))\n",
    "            f.write(struct.pack('I', self.node_type))\n",
    "            f.write(struct.pack('I', node_per_page))\n",
    "            f.write(struct.pack('I', self.neighbor_table_size))\n",
    "            f.write(struct.pack('I', self.vector_size))\n",
    "            if self.raw_vectors is not None:\n",
    "                f.write(struct.pack('I', self.raw_vectors.shape[1] * self.raw_vectors.itemsize)) #写入原始向量的占用字节大小\n",
    "                #写入原始向量的类型，因为即使图类型相同，这里的向量也可能在不同索引结构下有差异\n",
    "                dtype_str = str(self.raw_vectors.dtype)  # 原始字符串（如'float32'）\n",
    "                dtype_bytes = dtype_str.encode('utf-8')  # 转为字节（如b'float32'，长度6字节）\n",
    "                # 填充至8字节：使用 ljust(8, b'\\x00')，不足则补空字节，超过则保留原长度（需配合截断）\n",
    "                fixed_bytes = dtype_bytes.ljust(8, b'\\x00')[:8]  # 确保最终长度为8字节\n",
    "                f.write(fixed_bytes)  # 写入固定8字节\n",
    "\n",
    "            f.seek(alignment) #第一页空出来只存头信息\n",
    "\n",
    "            written_vectors = 0\n",
    "            for i in range(self.n):\n",
    "                f.write(self.graph[i].tobytes())\n",
    "                if self.vectors is not None:\n",
    "                    f.write(self.vectors[i].tobytes())\n",
    "                if self.raw_vectors is not None:\n",
    "                    f.write(self.raw_vectors[i].tobytes())\n",
    "\n",
    "                written_vectors += 1\n",
    "                if written_vectors >= node_per_page:\n",
    "                    written_vectors = 0\n",
    "                    offset = alignment * ((i+1) // node_per_page + 1)\n",
    "                    # print(\"jump to\",offset)\n",
    "                    f.seek(offset)\n",
    "\n",
    "        print(f\"Graph saved to {path}\")\n",
    "\n",
    "    #从faiss输出的文件中读取图结构\n",
    "    def load_from_faiss(self, path):\n",
    "        #faiss存储图的结构是从83字节开始，4字节的int32存储邻接表，直到碰到0xFFFFFFFF结束\n",
    "        #循环读取，直到读取到全部n个节点\n",
    "        total_neighbors_count = 0\n",
    "        total_nodes = 0\n",
    "        with open(path, 'rb') as f:\n",
    "            f.seek(83)\n",
    "            for i in range(self.n):\n",
    "                neighbors = []\n",
    "                neighbors_count = 0\n",
    "                while True:\n",
    "                    neighbor = struct.unpack('I', f.read(4))[0]\n",
    "                    if neighbor == 0xFFFFFFFF:\n",
    "                        break\n",
    "                    neighbors.append(neighbor)\n",
    "                    neighbors_count += 1\n",
    "                self.graph[i][:neighbors_count] = neighbors[:neighbors_count]\n",
    "                total_neighbors_count += neighbors_count\n",
    "                total_nodes += 1\n",
    "        \n",
    "        print(f'total nodes: {total_nodes}, average neighbors: {total_neighbors_count/total_nodes}')\n",
    "        return total_neighbors_count/total_nodes\n",
    "\n",
    "    #从diskann输出的文件中读取图结构\n",
    "    #传入graph_vector_type表明DiskANN的图中的向量类型，和其他节点向量的类型可能不同需要单独处理\n",
    "    def load_from_diskann(self, path, graph_vector_type):\n",
    "        print(\"load graph from diskann of path:\",path)\n",
    "        total_neighbors_count = 0\n",
    "        total_nodes = 0 #从文件中真实读取到的节点数\n",
    "        with open(path, 'rb') as f:\n",
    "            #获取头信息\n",
    "            f.seek(8,0)\n",
    "            total_nodes_in_head = struct.unpack('Q', f.read(8))[0]\n",
    "            if total_nodes_in_head != self.n:\n",
    "                input(\"Total nodes in head is not equal to n\")\n",
    "                pause()\n",
    "            dim_in_head = struct.unpack('Q', f.read(8))[0]\n",
    "            vector_size_in_diskann = dim_in_head*graph_vector_type.itemsize\n",
    "            entry_point_id = struct.unpack('Q', f.read(8))[0]\n",
    "            node_size_in_diskann = struct.unpack('Q', f.read(8))[0]\n",
    "            node_count_per_page = struct.unpack('Q', f.read(8))[0] #每个4k页里面包含的节点数，读取时需要\n",
    "            f.seek(72,0)\n",
    "            file_size = struct.unpack('Q', f.read(8))[0]\n",
    "            print(f\"file size: {file_size}, node size: {node_size_in_diskann}, node count per page: {node_count_per_page}, vector_size_in_diskann: {vector_size_in_diskann}\")\n",
    "            \n",
    "            #开始读取邻居\n",
    "            current_page = 1 #表明当前在读哪一个page\n",
    "            f.seek(current_page*diskann_page_size,0)\n",
    "            for i in range(self.n):\n",
    "                f.seek(vector_size_in_diskann, 1) #跳过原始向量\n",
    "                neighbors_count = struct.unpack('I', f.read(4))[0]\n",
    "                neighbors = []\n",
    "                for n_index in range(neighbors_count):\n",
    "                    neighbor_id = struct.unpack('I', f.read(4))[0]\n",
    "                    neighbors.append(neighbor_id)\n",
    "                self.graph[i][:neighbors_count] = neighbors[:neighbors_count]\n",
    "                total_neighbors_count += neighbors_count\n",
    "\n",
    "                total_nodes += 1\n",
    "                if total_nodes%node_count_per_page == 0:\n",
    "                    current_page += 1\n",
    "                    f.seek(current_page*diskann_page_size,0)\n",
    "        \n",
    "        print(f'Load graph from diskann success, total nodes: {total_nodes}, average neighbors: {total_neighbors_count/total_nodes}')\n",
    "        return total_neighbors_count/total_nodes\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Computing ground truth on dataset including 16000 vectors for 10000 queries and 100 ground truths per query\n",
      "Computed ground truth for 1000 queries\n",
      "Computed ground truth for 2000 queries\n",
      "Computed ground truth for 3000 queries\n",
      "Computed ground truth for 4000 queries\n",
      "Computed ground truth for 5000 queries\n",
      "Computed ground truth for 6000 queries\n",
      "Computed ground truth for 7000 queries\n",
      "Computed ground truth for 8000 queries\n",
      "Computed ground truth for 9000 queries\n",
      "Computed ground truth for 10000 queries\n",
      "Ground truth file saved to bigann_1M_annlite_c16000_re2_pq64_R30/L2_graph_gt.bin\n",
      "Time taken: 6.221520662307739 seconds\n",
      "total nodes: 16000, average neighbors: 17.9755\n",
      "Building AiSAQ...\n",
      "Building AiSAQ Done.\n",
      "Update graph vector shape: (16000, 30, 64) in type uint8\n",
      "Updated graph node size(B): 2552, neighbor_table_size(B): 120, vector_size(B): 1920, raw_vectors_size(B): 512\n",
      "Alignment size: 4096, self.node_size: 2552, node per page: 1\n",
      "Graph saved to bigann_1M_annlite_c16000_re2_pq64_R30/L2_graph.bin\n"
     ]
    }
   ],
   "source": [
    "# 建l2图\n",
    "# 1.根据索引类型选择是对聚类中心还是原始数据建图\n",
    "l2_dataset = None\n",
    "l2_dataset_path = None\n",
    "if(build_mode == \"annlite\" or build_mode == \"spann\"):\n",
    "    l2_dataset = centroids\n",
    "    l2_dataset_path = cluster_center_path\n",
    "elif(build_mode == \"diskann\" or build_mode == \"aisaq\"):\n",
    "    l2_dataset = dataset\n",
    "    l2_dataset_path = dataset_path\n",
    "l2_graph = Graph(l2_dataset.shape[0], l2_graph_R)\n",
    "\n",
    "# 2.计算l2层的gt\n",
    "if evaluation and (build_mode == \"annlite\" or build_mode == \"spann\"):\n",
    "    #如果l2_gt_path已经创建了则不重复创建\n",
    "    if not os.path.exists(l2_gt_path):\n",
    "        compute_gt_multithread(l2_dataset, queries, l2_gt_path, gt_k)\n",
    "\n",
    "# 3.建图并加载图结构\n",
    "l2_average_neibors = 0.0\n",
    "if graph_type == \"nsg\":\n",
    "    # 创建一个NSG索引\n",
    "    NSG_index = faiss.IndexNSGFlat(dataset.shape[1], l2_graph_R)  # 创建NSG索引，度为R\n",
    "    # 添加向量到索引，图会自动构建\n",
    "    NSG_index.add(l2_dataset)\n",
    "    # 写入到文件\n",
    "    temp_graph_path = \"nsg_index.faiss\" #临时文件，只是用于转换为图结构\n",
    "    faiss.write_index(NSG_index, temp_graph_path)\n",
    "    # 读图\n",
    "    l2_average_neibors = l2_graph.load_from_faiss(temp_graph_path)\n",
    "elif graph_type == \"vamana\":\n",
    "    if not os.path.exists(diskann_output_path):\n",
    "        os.makedirs(diskann_output_path)\n",
    "    #计算diskann_search_dram_budget\n",
    "    diskann_search_dram_budget = ((pq_bucket+0.5) * l2_dataset.shape[0])/(1024**3)\n",
    "    diskann_search_dram_budget = round(diskann_search_dram_budget,6)\n",
    "    #DiskANN文档计算方法\n",
    "    # diskann_search_dram_budget = (l2_dataset.shape[0] * pq_bucket) / 2**30  + (10000*(4*l2_graph_R + np.dtype(type).itemsize*l2_dataset.shape[1])) / 2**30\n",
    "    #构建DiskANN索引构建指令\n",
    "    diskann_build_cmd  = diskann_executor_path\n",
    "    diskann_build_cmd += \" --data_type\"\n",
    "    if(l2_dataset.dtype == np.float32):\n",
    "        diskann_build_cmd += \" float\"\n",
    "    elif(l2_dataset.dtype == np.uint8):\n",
    "        diskann_build_cmd += \" uint8\"\n",
    "    else:\n",
    "        input(\"Unsupported data type!\",l2_dataset.dtype)\n",
    "    diskann_build_cmd += \" --dist_fn l2\" #暂没考虑其他类型的距离范式\n",
    "    diskann_build_cmd += \" --data_path \" + l2_dataset_path\n",
    "    diskann_build_cmd += \" --index_path_prefix \" + diskann_output_path\n",
    "    diskann_build_cmd += \" -R %d\"%l2_graph_R\n",
    "    diskann_build_cmd += \" -L%d\"%diskann_build_L\n",
    "    diskann_build_cmd += \" -B %f\"%diskann_search_dram_budget\n",
    "    diskann_build_cmd += \" -M %d\"%diskann_build_dram_limit\n",
    "\n",
    "    #执行建图指令\n",
    "    print(\"[ANNLite Builder]Build DiskANN by cmd: \",diskann_build_cmd)\n",
    "    os.system(diskann_build_cmd)\n",
    "    print(\"[ANNLite Builder]Build DiskANN Done\")\n",
    "\n",
    "    #读图\n",
    "    l2_average_neibors = l2_graph.load_from_diskann(diskann_output_path+\"_disk.index\", l2_dataset.dtype)\n",
    "else:\n",
    "    input(\"Graph type not supported.\")\n",
    "\n",
    "# 4.根据不同的节点类型，往图中注入向量\n",
    "node_type = l2_graph_node_type\n",
    "# node_type = 0\n",
    "graph_vector = None\n",
    "if node_type == 0:\n",
    "    graph_vector = None\n",
    "elif node_type == 1:\n",
    "    graph_vector = l2_dataset #Like DiskANN\n",
    "elif node_type == 2:\n",
    "    if(graph_type == \"vamana\"):\n",
    "        pq_codes = read_bin(diskann_output_path+\"_pq_compressed.bin\", np.uint8)\n",
    "    graph_vector = pq_codes\n",
    "elif node_type == 3 or node_type == 4:\n",
    "    print(\"Building AiSAQ...\")\n",
    "    if(graph_type == \"vamana\"):\n",
    "        pq_codes = read_bin(diskann_output_path+\"_pq_compressed.bin\", np.uint8)\n",
    "    graph_vector = l2_graph.build_AiSAQ(pq_codes) #Like AiSAQ\n",
    "    print(\"Building AiSAQ Done.\")\n",
    "l2_graph.update_node_type(graph_vector, node_type, l2_dataset) #在图中附加向量，用于写入图\n",
    "\n",
    "# l2_graph.print()\n",
    "\n",
    "# 将图写入到文件\n",
    "# l2_graph.save_as_bin(l2_graph_root_path+f\".type{node_type}\", alignment_size)\n",
    "l2_graph.save_as_bin(l2_graph_root_path, alignment_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 100,
   "metadata": {},
   "outputs": [],
   "source": [
    "#读取PQ表的一些辅助函数，针对DiskANN的\n",
    "def read_pq_pivots(pivots_file):\n",
    "    \"\"\"读取_pq_pivots.bin文件中的质心数据\"\"\"\n",
    "    with open(pivots_file, 'rb') as f:\n",
    "        # 读取元数据的条目数和维度数\n",
    "        metadata_num = np.fromfile(f, dtype=np.uint32, count=1)[0]  # 条目数\n",
    "        metadata_dim = np.fromfile(f, dtype=np.uint32, count=1)[0]  # 维度数\n",
    "        assert metadata_dim == 1, f\"元数据维度应为1，但实际为{metadata_dim}\"\n",
    "        \n",
    "        # 读取元数据偏移量（修正：先读后用）\n",
    "        metadata_offset = np.fromfile(f, dtype=np.uint64, count=metadata_num)\n",
    "        \n",
    "        # 读取全量枢轴数据（质心表）\n",
    "        f.seek(metadata_offset[0])\n",
    "        num_centroids = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "        dim_per_centroid = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "        diskann_centroids = np.fromfile(f, dtype=np.float32, count=num_centroids * dim_per_centroid)\n",
    "        diskann_centroids = diskann_centroids.reshape(num_centroids, dim_per_centroid)\n",
    "        \n",
    "        # 读取质心向量\n",
    "        f.seek(metadata_offset[1])\n",
    "        num_global_centroid = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "        dim_per_global_centroid = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "        global_centroid = np.fromfile(f, dtype=np.float32, count=num_global_centroid*dim_per_global_centroid)\n",
    "        global_centroid = global_centroid.reshape(-1, num_global_centroid)\n",
    "        \n",
    "        # 读取块偏移量（子空间划分）\n",
    "        f.seek(metadata_offset[2])\n",
    "        num_chunk_offsets = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "        dim_chunk_offsets = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "        chunk_offsets = np.fromfile(f, dtype=np.uint32, count=num_chunk_offsets*dim_chunk_offsets)\n",
    "    \n",
    "    return diskann_centroids, global_centroid, chunk_offsets\n",
    "\n",
    "#DiskANN读到的PQ向量是一行是同一个位置的中心组成的完整向量，ANNLite的是一行是一个子空间的所有中心\n",
    "#传入的pq_centroids是DiskANN读到的PQ向量，行数为向量数（通常为256,即2^8），列数为原始向量的维度\n",
    "def convert_pq_centroids(pq_centroids, pq_bucket):\n",
    "    pq_bucket_size = pq_centroids.shape[1] // pq_bucket #每个子空间包含多少个向量，这里没考虑不能整除的情况\n",
    "    new_pq_centroids = np.zeros((pq_bucket, pq_centroids.shape[0]*pq_bucket_size), dtype=np.float32)\n",
    "    #将向量写入\n",
    "    for bucket_id in range(pq_bucket): #对于每个桶\n",
    "        for line_number in range(pq_centroids.shape[0]): #对于每个向量\n",
    "            new_pq_centroids[bucket_id][line_number*pq_bucket_size:(line_number+1)*pq_bucket_size] = pq_centroids[line_number][bucket_id*pq_bucket_size:(bucket_id+1)*pq_bucket_size]\n",
    "    return new_pq_centroids"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {},
   "outputs": [],
   "source": [
    "#如果是vamana图，则PQ向量需要在建图之后才能获取\n",
    "if(graph_type == \"vamana\"):\n",
    "    #读取pq压缩向量\n",
    "    pq_codes = read_bin(diskann_output_path+\"_pq_compressed.bin\", np.uint8)\n",
    "    #写入文件。这里本质上就是拷贝了一遍，但是为了保持一致、灵活性和进行检查，还是进行读取后写入的操作\n",
    "    write_bin(pq_vector_path, pq_codes)\n",
    "    print(\"write pq codes done:\",pq_vector_path)\n",
    "\n",
    "    #读取pq表\n",
    "    diskann_centroids, global_centroid, chunk_offsets = read_pq_pivots(diskann_output_path+\"_pq_pivots.bin\")\n",
    "    \n",
    "    #转换格式\n",
    "    new_centroids = convert_pq_centroids(diskann_centroids, pq_bucket)\n",
    "    write_bin(pq_table_path, new_centroids)\n",
    "    print(\"write pq table done:\",pq_table_path)\n",
    "\n",
    "    #写入global_centroid\n",
    "    write_bin(pq_centorid_path, global_centroid)\n",
    "    print(\"write pq centroid done:\",pq_centorid_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "metadata": {},
   "outputs": [],
   "source": [
    "# np.set_printoptions(threshold=10000) #元素数量≤阈值的时候完整打印\n",
    "# print(l2_graph.graph)\n",
    "# print(l2_graph.graph[0])\n",
    "# print(l2_graph.graph[0].tobytes()[:4])\n",
    "# print(int.from_bytes(l2_graph.graph[0].tobytes()[:4],byteorder='little'))\n",
    "# print(pq_codes[81])\n",
    "# print(centroids[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 103,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "l1 simple node count: 89\n",
      "l1_id_list len: 1634 True simple ratio: 0.102125\n",
      "l1_graph_node_id.shape: (1634,)\n",
      "l1_graph_node.shape: (1634, 128)\n",
      "Computing ground truth on dataset including 1634 vectors for 10000 queries and 100 ground truths per query\n",
      "Computed ground truth for 1000 queries\n",
      "Computed ground truth for 2000 queries\n",
      "Computed ground truth for 3000 queries\n",
      "Computed ground truth for 4000 queries\n",
      "Computed ground truth for 5000 queries\n",
      "Computed ground truth for 6000 queries\n",
      "Computed ground truth for 7000 queries\n",
      "Computed ground truth for 8000 queries\n",
      "Computed ground truth for 9000 queries\n",
      "Computed ground truth for 10000 queries\n",
      "Ground truth file saved to bigann_1M_annlite_c16000_re2_pq64_R30/L1_graph_gt.bin\n",
      "Time taken: 4.766779899597168 seconds\n",
      "Build NSG graph...\n",
      "total nodes: 1634, average neighbors: 13.173806609547123\n",
      "Update graph vector shape: (1634, 64) in type uint8\n",
      "Updated graph node size(B): 184, neighbor_table_size(B): 120, vector_size(B): 64\n",
      "Alignment size: 4096, self.node_size: 184, node per page: 22\n",
      "Graph saved to bigann_1M_annlite_c16000_re2_pq64_R30/L1_graph.bin\n"
     ]
    }
   ],
   "source": [
    "# L1建图\n",
    "# l1_graph_sample_ratio = 0.1 #for debug\n",
    "# l1_ncs_threshold = 1 #for debug\n",
    "# l1_graph_R = 30\n",
    "# graph_type = \"nsg\"\n",
    "\n",
    "#采样部分=========================================================================================\n",
    "l1_graph_node_count = 0\n",
    "np.random.seed(cluster_count) #设置一个伪随机的种子，让聚类数量不变时采样结果保持不变，便于观察性能\n",
    "if(build_mode == \"annlite\"):\n",
    "    l1_graph_node_count = int(l2_graph.n * l1_graph_sample_ratio) #l1层应该包含的点数\n",
    "    l1_graph_node_id = None #存储采样的点的id\n",
    "    if(l1_simpling_method == \"random\"):\n",
    "        # 对L1建图.先从L2的点中采样，然后用采样的点来构建L1图\n",
    "        l1_graph_node_id = np.random.choice(l2_graph.n, l1_graph_node_count, replace=False) #采样的点在L2中对应的id\n",
    "        l1_cached_list = np.zeros((1,1), dtype=np.int32) #随机模式下几乎不会有已经完全缓存了的\n",
    "        write_bin(l1_cachelist_path, l1_cached_list)\n",
    "    elif(l1_simpling_method == \"ncs\"):\n",
    "        #计算采样多少个点. 点集p表示抽样的点、pn表示抽样的点的邻居点集\n",
    "        l1_p_count = int(l1_graph_node_count/l2_average_neibors) #这只是一个初始值，肯定是填不满l1_graph_node_count的\n",
    "        #先把抽样点取出来\n",
    "        l1_p_id = np.random.choice(l2_graph.n, l1_p_count, replace=False) #采样的点在L2中对应的id\n",
    "        l1_p_id_set = list(int(x) for x in l1_p_id)\n",
    "        print(\"l1 simple node count:\", len(l1_p_id_set))\n",
    "        #把抽样点放进l1点的集合中\n",
    "        l1_id_set = set(l1_p_id_set)\n",
    "        #遍历抽样点，把每个点在l2的邻居全部加入集合中\n",
    "        for p_id in l1_p_id:\n",
    "            pn_ids = l2_graph.graph[p_id]\n",
    "            for pn_id in pn_ids:\n",
    "                l1_id_set.add(int(pn_id))\n",
    "        \n",
    "        #进行补充\n",
    "        while(len(l1_id_set) < l1_graph_node_count):\n",
    "            #取一个点出来\n",
    "            next_node_id = int(np.random.choice(l2_graph.n, 1)[0])\n",
    "            #判断这个点是否在l1_p_id_set中.注意这里不是判断是否在最终集合中，因为只要不是在p点集中都可以增加采样数\n",
    "            # print(\"next_node_id:\",next_node_id)\n",
    "            if next_node_id in l1_p_id_set:\n",
    "                continue\n",
    "            \n",
    "            #把这个点加入l1_p_id_set\n",
    "            l1_id_set.add(next_node_id)\n",
    "            #把这个点对应的邻居加入l1_id_set\n",
    "            pn_ids = l2_graph.graph[next_node_id]\n",
    "            for pn_id in pn_ids:\n",
    "                l1_id_set.add(int(pn_id))\n",
    "\n",
    "        try:\n",
    "            l1_id_set.remove(-1)\n",
    "        except:\n",
    "            pass\n",
    "\n",
    "        #采样结果\n",
    "        l1_id_list = list(l1_id_set) #转换为list便于处理\n",
    "        print(\"l1_id_list len:\",len(l1_id_list), \"True simple ratio:\", len(l1_id_list)/l2_graph.n)\n",
    "\n",
    "        #采样结束，记录哪些点已经是被完全缓存了的\n",
    "        l1_cached_list = []\n",
    "        for i in range(len(l1_id_list)):\n",
    "            node_id = l1_id_list[i] \n",
    "            node_neighbor_ids = l2_graph.graph[node_id]\n",
    "            #计算这个点有多少个邻居，用于判断是否邻居已经全部缓存\n",
    "            node_neighbor_ids_list = set(int(x) for x in node_neighbor_ids)\n",
    "            try:\n",
    "                node_neighbor_ids_list.remove(-1)\n",
    "            except:\n",
    "                pass\n",
    "            node_neighbor_ids_list = list(node_neighbor_ids_list)\n",
    "            neighbor_count = len(node_neighbor_ids_list)\n",
    "            # print(f\"node {node_id} neighbor count: {neighbor_count}\")\n",
    "            # print(\"node_neighbor_ids_list:\",node_neighbor_ids_list)\n",
    "            #计算邻居有多少点被缓存了\n",
    "            cached_neighbor_count = 0\n",
    "            for node_neighbor_id in node_neighbor_ids_list:\n",
    "                if(node_neighbor_id in l1_id_set):\n",
    "                    cached_neighbor_count += 1\n",
    "            if cached_neighbor_count >= neighbor_count * l1_ncs_threshold: #只要缓存了多少的比例，就认为这个点已经被完全缓存了\n",
    "                l1_cached_list.append(node_id)\n",
    "\n",
    "        l1_cached_list = np.array(l1_cached_list).astype(np.uint32).reshape(-1,1)\n",
    "        write_bin(l1_cachelist_path, l1_cached_list)\n",
    "        #将l1_id_list转为numpy数组\n",
    "        l1_graph_node_id = np.array(l1_id_list)\n",
    "        print(\"l1_graph_node_id.shape:\", l1_graph_node_id.shape)\n",
    "\n",
    "    else:\n",
    "        input(\"l1_simpling_method error!!\")\n",
    "\n",
    "    #修正l1_graph_node_count的值，因为ncs采样后的节点数量会变化\n",
    "    l1_graph_node_count = l1_graph_node_id.shape[0]\n",
    "    l1_graph_node = np.zeros((l1_graph_node_count, dataset.shape[1]), dtype=l2_dataset.dtype) #采样的点的向量\n",
    "    l1_graph_node_pq = np.zeros((l1_graph_node_count, pq_codes.shape[1]), dtype=pq_codes.dtype) #这个用来存储pq向量，不能用这个直接建图\n",
    "    #将这些点对应的向量写入l1_graph_node\n",
    "    for i in range(l1_graph_node_count):\n",
    "        l1_graph_node[i] = l2_dataset[l1_graph_node_id[i]]\n",
    "        if(l1_graph_node_type == 2):\n",
    "            l1_graph_node_pq[i] = pq_codes[l1_graph_node_id[i]] # 这里可以选择是否使用PQ编码的向量\n",
    "    #写入文件.正常是附加在图中的，不用写入\n",
    "    # write_bin(l1_graph_vector_path, l1_graph_node)\n",
    "\n",
    "    print(\"l1_graph_node.shape:\", l1_graph_node.shape)\n",
    "    # 计算l1层的gt\n",
    "    if evaluation:\n",
    "        compute_gt_multithread(l1_graph_node, queries, l1_gt_path, gt_k)\n",
    "\n",
    "    #建图部分=========================================================================================\n",
    "    #对l1_graph_node建图\n",
    "    l1_graph = Graph(l1_graph_node.shape[0], l1_graph_R)\n",
    "    if graph_type == \"nsg\":\n",
    "        print(\"Build NSG graph...\")\n",
    "        l1_NSG_index = faiss.IndexNSGFlat(l1_graph_node.shape[1], l1_graph_R)  # 创建NSG索引，度为R \n",
    "        l1_NSG_index.add(l1_graph_node)\n",
    "        # 写入到文件\n",
    "        temp_graph_path = \"l1_nsg_index.faiss\" #临时文件，只是用于转换为图结构\n",
    "        faiss.write_index(l1_NSG_index, temp_graph_path)\n",
    "        # 从文件中读取图\n",
    "        l1_graph.load_from_faiss(temp_graph_path)\n",
    "    elif graph_type == \"vamana\":\n",
    "        print(\"Build Vamana graph...\")\n",
    "\n",
    "        #先把l1_graph_node写入文件用于建图\n",
    "        l1_raw_vectors_path = \"l1_raw_vectors_temp.bin\"\n",
    "        write_bin(l1_raw_vectors_path, l1_graph_node)\n",
    "\n",
    "        l1_diskann_output_path = diskann_output_path+\"_l1/\"\n",
    "        if not os.path.exists(l1_diskann_output_path):\n",
    "            os.makedirs(l1_diskann_output_path)\n",
    "        #计算diskann_search_dram_budget,L1建图无限制\n",
    "        diskann_search_dram_budget = 100\n",
    "        #构建DiskANN索引构建指令\n",
    "        diskann_build_cmd  = diskann_executor_path\n",
    "        diskann_build_cmd += \" --data_type\"\n",
    "        if(l1_graph_node.dtype == np.float32):\n",
    "            diskann_build_cmd += \" float\"\n",
    "        elif(l1_graph_node.dtype == np.uint8):\n",
    "            diskann_build_cmd += \" uint8\"\n",
    "        else:\n",
    "            print(\"Unsupported data type!\",l1_graph_node.dtype)\n",
    "            pause()\n",
    "        diskann_build_cmd += \" --dist_fn l2\" #暂没考虑其他类型的距离范式\n",
    "        diskann_build_cmd += \" --data_path \" + l1_raw_vectors_path\n",
    "        diskann_build_cmd += \" --index_path_prefix \" + l1_diskann_output_path\n",
    "        diskann_build_cmd += \" -R %d\"%l1_graph_R\n",
    "        diskann_build_cmd += \" -L%d\"%diskann_build_L\n",
    "        diskann_build_cmd += \" -B %f\"%diskann_search_dram_budget\n",
    "        diskann_build_cmd += \" -M %d\"%diskann_build_dram_limit\n",
    "\n",
    "        #执行建图指令\n",
    "        print(\"[ANNLite Builder]Build DiskANN by cmd: \",diskann_build_cmd)\n",
    "        os.system(diskann_build_cmd)\n",
    "        print(\"[ANNLite Builder]Build DiskANN Done\")\n",
    "\n",
    "        #读图\n",
    "        l1_graph.load_from_diskann(l1_diskann_output_path+\"_disk.index\", l1_graph_node.dtype)\n",
    "    else:\n",
    "        input(\"Graph type not supported.\")\n",
    "\n",
    "    #写入l1图，因为l1图是要常驻内存的所以直接把向量接在后面即可\n",
    "    if(l1_graph_node_type==1):\n",
    "        l1_graph.update_node_type(l1_graph_node, l1_graph_node_type) #在图中附加向量，用于写入图\n",
    "    elif(l1_graph_node_type==2):\n",
    "        l1_graph.update_node_type(l1_graph_node_pq, l1_graph_node_type) #在图中附加PQ向量，用于写入图\n",
    "    l1_graph.save_as_bin(l1_graph_path, alignment_size)\n",
    "\n",
    "    #l1_graph_node_id转化为int32，并且reshape.\n",
    "    #这个相当于L1和L2之间的映射关系\n",
    "    l1_graph_node_id = l1_graph_node_id.astype(np.int32).reshape(-1,1)\n",
    "    write_bin(l1_l2_graph_node_mapping_path, l1_graph_node_id)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 104,
   "metadata": {},
   "outputs": [],
   "source": [
    "#使用json的方式写入配置参数\n",
    "import json\n",
    "\n",
    "#不写入数据集路径了，和数据集相关的现在放到了用户配置文件中\n",
    "config = {\n",
    "    \"index_mode\": build_mode_index,\n",
    "    \"dataset_path\": dataset_path,\n",
    "    \"query_path\": query_path,\n",
    "    \"gt_path\": gt_path,\n",
    "    \"build_mode\":build_mode,\n",
    "    \"graph_type\":graph_type,\n",
    "    \"cluster_center_path\": cluster_center_path.replace(output_folder, \"\"),\n",
    "    \"offset_list_path\": offset_list_path.replace(output_folder, \"\"),\n",
    "    \"last_layer_path\": last_layer_path.replace(output_folder, \"\"),\n",
    "    \"pq_vector_path\": pq_vector_path.replace(output_folder, \"\"),\n",
    "    \"pq_table_path\": pq_table_path.replace(output_folder, \"\"),\n",
    "    \"pq_centorid_path\": pq_centorid_path.replace(output_folder, \"\"),\n",
    "    \"l2_graph_root_path\": l2_graph_root_path.replace(output_folder, \"\"),\n",
    "    \"cluster_count\": cluster_count,\n",
    "    \"train_ratio\": train_ratio,\n",
    "    \"k\": k,\n",
    "    \"nprobe\": nprobe,\n",
    "    \"l2_graph_R\": l2_graph_R,\n",
    "    \"vector_count\": dataset.shape[0],\n",
    "    \"dim\": dataset.shape[1],\n",
    "    \"feature_type\": str(dataset.dtype),\n",
    "    \"pq_bucket\": pq_bucket,\n",
    "    \"pq_bit\": pq_bit,\n",
    "    \"alignment_size\": alignment_size,\n",
    "    \"l1_graph_path\": l1_graph_path.replace(output_folder, \"\"),\n",
    "    \"l1_l2_graph_node_mapping_path\": l1_l2_graph_node_mapping_path.replace(output_folder, \"\"),\n",
    "    \"l1_graph_R\": l1_graph_R,\n",
    "    \"l1_graph_node_count\": l1_graph_node_count,\n",
    "    \"l1_graph_node_type\": l1_graph_node_type,\n",
    "    \"l2_graph_node_type\": l2_graph_node_type,\n",
    "    \"l1_gt_path\":  l1_gt_path.replace(output_folder, \"\"),\n",
    "    \"l2_gt_path\": l2_gt_path.replace(output_folder, \"\"),\n",
    "    \"l1_cachelist_path\": l1_cachelist_path.replace(output_folder, \"\"),\n",
    "    \"l2_average_neibors\": l2_average_neibors\n",
    "}\n",
    "\n",
    "with open(config_path, \"w\") as f:\n",
    "    json.dump(config, f, indent=4)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 105,
   "metadata": {},
   "outputs": [],
   "source": [
    "#如果output_folder目录下存在buildtime开头的文件，则删除该文件\n",
    "import os\n",
    "for f in os.listdir(output_folder):\n",
    "    if f.startswith(\"buildtime\"):\n",
    "        os.remove(output_folder+f)\n",
    "#以当前的日期时间作为文件名新建一个文件，以标记该索引的创建时间\n",
    "import datetime\n",
    "now = datetime.datetime.now()\n",
    "f = open(output_folder+\"buildtime-\"+now.strftime(\"%Y-%m-%d-%H:%M\"), 'w')\n",
    "f.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'pause' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mNameError\u001b[39m                                 Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[106]\u001b[39m\u001b[32m, line 2\u001b[39m\n\u001b[32m      1\u001b[39m \u001b[38;5;28minput\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mBuild Finish! Output to \u001b[39m\u001b[38;5;132;01m{\u001b[39;00moutput_folder\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m) \u001b[38;5;66;03m#等待用户输入\u001b[39;00m\n\u001b[32m----> \u001b[39m\u001b[32m2\u001b[39m \u001b[43mpause\u001b[49m() \u001b[38;5;66;03m#用于执行到这里就停止\u001b[39;00m\n",
      "\u001b[31mNameError\u001b[39m: name 'pause' is not defined"
     ]
    }
   ],
   "source": [
    "input(f\"Build Finish! Output to {output_folder}\") #等待用户输入\n",
    "pause() #用于执行到这里就停止"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "''"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#下面的都是实验性代码\n",
    "input()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "PQ_index train done\n",
      "PQ_index add done\n",
      "pq_codes shape: (1000, 128)\n",
      "pq_codes: [[40.83611    4.3014355  0.9839076 ... 16.905008   7.8478904  9.4247875]\n",
      " [30.530243  17.645502   8.210951  ... 25.815485  45.362263  22.928146 ]\n",
      " [ 4.0218444  2.4558568  3.0369928 ...  4.5160866  5.3875093  9.401179 ]\n",
      " ...\n",
      " [64.0495    22.337753   5.728892  ... 15.509003  21.151676  28.469551 ]\n",
      " [ 9.651926   9.384191   7.501774  ... 73.22085   27.381193  11.742478 ]\n",
      " [30.565502  14.020632   9.882086  ... 22.976032  35.265907  46.667374 ]]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n",
      "WARNING clustering 1000 points to 256 centroids: please provide at least 9984 training points\n"
     ]
    }
   ],
   "source": [
    "#开始构建PQ向量\n",
    "pq_m = 16 #分成几段\n",
    "pq_bit = 8 #每个段用几个bit去量化（这里量化的意思就是，先把向量进行聚类,然后用pq_bit表示聚类的id。因此聚类的数量就是2^pq_bit）\n",
    "PQ_index = faiss.IndexPQ(centroids.shape[1], pq_m, pq_bit)\n",
    "PQ_index.train(centroids)\n",
    "print(\"PQ_index train done\")\n",
    "PQ_index.add(centroids)\n",
    "print(\"PQ_index add done\")\n",
    "\n",
    "pq_codes = PQ_index.reconstruct_n(0, PQ_index.ntotal) #重建向量，即使用PQ重新构建出的向量（在这里没什么意义）\n",
    "print(\"pq_codes shape:\", pq_codes.shape)\n",
    "print(\"pq_codes:\", pq_codes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "PQ 压缩向量 pq_codes（NumPy 数组形式）：\n",
      "(16000,) uint8\n",
      "簇心信息 out_centroids（NumPy 数组形式）：\n",
      "(32768,) float32\n",
      "pq_codes reshape of [0]:\n",
      " [[ 26 244 156 ... 125  89  68]]\n",
      "bucket_size: 2048 pq_vector_dim: 8\n",
      "208\n",
      "[40.83611    4.3014355  0.9839076 ... 17.04902   44.83664   65.43688  ]\n",
      "4000\n",
      "[15.559765   3.9408097  2.1228154 ... 99.64698   91.15298   36.7002   ]\n",
      "5344\n",
      "[34.535786   8.542513   5.2833347 ... 41.620205  56.666187  56.96575  ]\n",
      "8184\n",
      "[34.31311  14.114498 10.960928 ... 10.416082  8.652399 20.477543]\n",
      "9064\n",
      "[16.590559  9.966529 20.705217 ... 19.870474 32.18851  34.643963]\n",
      "10528\n",
      "[11.638332  3.881784  7.496114 ... 75.50459  96.44007  31.44656 ]\n",
      "12600\n",
      "[106.090965  35.523525   8.385369 ...  20.715433  28.34224   88.63263 ]\n",
      "15680\n",
      "[79.6228   25.60147  16.781967 ...  7.833055  6.314881 29.36789 ]\n",
      "18104\n",
      "[ 9.70793   11.019244  43.36844   ...  8.2998085  5.3449554  6.532251 ]\n",
      "18576\n",
      "[11.952099  21.822672  92.14529   ... 11.075042   5.835611   5.7782974]\n",
      "21048\n",
      "[109.0588    100.31952    67.42155   ...   1.9137354   3.4913347\n",
      "  27.209799 ]\n",
      "24448\n",
      "[76.070526 31.861273 13.616473 ... 13.312683  8.654302 23.233685]\n",
      "25576\n",
      "[12.812498  10.459351  19.214544  ...  7.1236076  5.5772943  9.875125 ]\n",
      "27624\n",
      "[17.113607  28.982136  76.3429    ...  5.646146   5.9851937  9.611012 ]\n",
      "29384\n",
      "[28.651428  69.16989   79.73816   ...  7.9700756  6.5217376  8.631322 ]\n",
      "31264\n",
      "[21.276329  15.7910595  8.299057  ... 16.905008   7.8478904  9.4247875]\n",
      "pq_codes_recons2 of [0]:\n",
      " [40.83611    4.3014355  0.9839076 ... 16.905008   7.8478904  9.4247875]\n",
      "pq_codes_recons of [0]:\n",
      " [[40.83611    4.3014355  0.9839076 ... 16.905008   7.8478904  9.4247875]]\n",
      "diff:\n",
      " [[0. 0. 0. ... 0. 0. 0.]]\n"
     ]
    }
   ],
   "source": [
    "# 获取 PQ 压缩向量并转换为 NumPy 数组\n",
    "# 根据输出结果可得，这个里面存的就是压缩后的每个向量的压缩表示，但是faiss会把他压缩存储进uint8类型\n",
    "# 因此pq_codes的长度=簇心数量*量化bit/8\n",
    "pq_codes = faiss.vector_to_array(PQ_index.codes)\n",
    "print(\"PQ 压缩向量 pq_codes（NumPy 数组形式）：\")\n",
    "print(pq_codes.shape, pq_codes.dtype)\n",
    "\n",
    "# 获取簇心信息并转换为 NumPy 数组\n",
    "# 簇心的大小与分桶的数量无关。与PQ bit成指数关系\n",
    "# 簇心的大小=桶数量*每个桶包含的簇心的大小=桶数量*（每个桶包含的簇心数量*每个簇心的大小）=桶数量 *（2^bit * dim/桶）= dim * 2^bit\n",
    "# 组织方式不明，需要打印出来然后和pq向量还原过来对照看看\n",
    "pq = PQ_index.pq \n",
    "out_centroids = faiss.vector_to_array(pq.centroids)\n",
    "print(\"簇心信息 out_centroids（NumPy 数组形式）：\")\n",
    "print(out_centroids.shape, out_centroids.dtype)\n",
    "\n",
    "# print(\"pq_codes:\",pq_codes)\n",
    "# print(\"out_centroids:\",out_centroids)\n",
    "#将np数组out_centroids按行写入一个文件中\n",
    "# np.savetxt(\"out_centroids.txt\", out_centroids, fmt='%f')\n",
    "\n",
    "#将pq_codes reshape为每行长度为pq_m\n",
    "pq_codes = pq_codes.reshape(-1, pq_m)[:1]\n",
    "# np.set_printoptions(threshold=np.inf)\n",
    "print(\"pq_codes reshape of [0]:\\n\",pq_codes)\n",
    "\n",
    "#重建向量0用于比较\n",
    "pq_codes_recons = PQ_index.reconstruct_n(0, 1) #用faiss库重建向量0\n",
    "\n",
    "def reconstruct_pq_vector(pq_codes, out_centroids, pq_m, pq_bit, dim):\n",
    "    reconstructed_vector = np.zeros(dim, dtype=type)\n",
    "    bucket_vector_count = 2 ** pq_bit #每个桶包含的截断向量的数量 #256\n",
    "    pq_vector_dim = dim // pq_m #每个截断向量的维度 #8\n",
    "    bucket_size = bucket_vector_count * pq_vector_dim #每个桶的占用空间\n",
    "    print(\"bucket_size:\",bucket_size, \"pq_vector_dim:\",pq_vector_dim)\n",
    "    # 对每个桶进行解码\n",
    "    for i in range(pq_m):\n",
    "        bucket_index_offset = i * bucket_size #当前分桶的起始偏移量\n",
    "        vector_offset_in_bucket = pq_codes[i] * pq_vector_dim #当前向量在桶内的起始偏移量\n",
    "        st_offset = bucket_index_offset+vector_offset_in_bucket\n",
    "        ed_offset = bucket_index_offset+vector_offset_in_bucket+pq_vector_dim\n",
    "        print(bucket_index_offset+vector_offset_in_bucket)\n",
    "        bucket_centroid = out_centroids[st_offset : ed_offset]\n",
    "        print(bucket_centroid)\n",
    "        reconstructed_vector[i*pq_vector_dim:(i+1)*pq_vector_dim] = bucket_centroid\n",
    "    return reconstructed_vector\n",
    "\n",
    "pq_codes_recons2 = reconstruct_pq_vector(pq_codes[0], out_centroids, pq_m, pq_bit, centroids.shape[0], centroids.shape[1])\n",
    "print(\"pq_codes_recons2 of [0]:\\n\",pq_codes_recons2)\n",
    "\n",
    "print(\"pq_codes_recons of [0]:\\n\",pq_codes_recons)\n",
    "\n",
    "#计算pq_codes_recons和pq_codes_recons2的差值\n",
    "diff = pq_codes_recons - pq_codes_recons2\n",
    "print(\"diff:\\n\",diff)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(16,)\n"
     ]
    }
   ],
   "source": [
    "print(pq_codes[0].shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "NSG搜索结果：\n",
      "距离: [[14.638881 14.860883 14.98222  15.652529]]\n",
      "索引: [[2027 2964  512 2021]]\n"
     ]
    }
   ],
   "source": [
    "import faiss\n",
    "import numpy as np\n",
    "\n",
    "# 生成一些示例数据\n",
    "d = 128  # 向量维度\n",
    "n = 3000  # 向量数量\n",
    "xb = np.random.random((n, d)).astype('float32')\n",
    "\n",
    "# 创建一个NSG索引\n",
    "NSG_index = faiss.IndexNSGFlat(d, 30)  # 创建NSG索引，16是图的度\n",
    "\n",
    "# 添加向量到索引，图会自动构建\n",
    "NSG_index.add(xb)\n",
    "\n",
    "# 搜索示例\n",
    "k = 4  # 返回的最近邻数量\n",
    "xq = np.random.random((1, d)).astype('float32')\n",
    "distances, indices = NSG_index.search(xq, k)\n",
    "print(\"NSG搜索结果：\")\n",
    "print(\"距离:\", distances)\n",
    "print(\"索引:\", indices)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[0.8831723  0.02065963 0.3575316  ... 0.17816335 0.40687758 0.01584534]\n",
      " [0.6857802  0.2282757  0.13992205 ... 0.58001345 0.19254294 0.6543304 ]\n",
      " [0.17146447 0.73399097 0.6810836  ... 0.5109914  0.60889894 0.95673156]\n",
      " ...\n",
      " [0.3713674  0.23798259 0.9140764  ... 0.655713   0.5609678  0.17581458]\n",
      " [0.01810152 0.13865073 0.94171613 ... 0.5280385  0.30123332 0.7820927 ]\n",
      " [0.89047265 0.90575397 0.54071546 ... 0.05784171 0.3199885  0.37917796]]\n"
     ]
    }
   ],
   "source": [
    "print(xb)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "节点 0 的近邻节点索引: [2307 1759 1515 ... 1155 2739  853]\n",
      "节点 1 的近邻节点索引: [1115  226 1113 ... 2820 2496 2942]\n",
      "节点 2 的近邻节点索引: [ 503  220  205 ... 1932  600 2420]\n",
      "节点 3 的近邻节点索引: [2638 1228 2009 ...  701 1809 1627]\n",
      "节点 4 的近邻节点索引: [1764 2731 2120 ... 1163 1827 1618]\n",
      "节点 5 的近邻节点索引: [1915  370 2664 ...  736   88 2768]\n",
      "节点 6 的近邻节点索引: [ 116 2578 1631 ...  943 1278 1551]\n",
      "节点 7 的近邻节点索引: [2601  733 2210 ...  201 2265 2220]\n",
      "节点 8 的近邻节点索引: [1551  313 2778 ... 1380  421 1090]\n",
      "节点 9 的近邻节点索引: [ 256 2044   87 ... 1041 2778  347]\n",
      "索引已保存到 nsg_index.faiss\n",
      "索引已从文件中读取\n",
      "使用加载的索引进行搜索的结果：\n",
      "距离: [[14.553033 15.232557 15.405725 ... 17.410328 17.421402 17.434025]]\n",
      "索引: [[1226 1943 2849 ... 1655 2549  235]]\n"
     ]
    }
   ],
   "source": [
    "# 对每个向量进行搜索，查看其近邻，以此推断图的连接情况\n",
    "k = 30  # 查看每个节点的4个近邻\n",
    "for i in range(10):\n",
    "    query_vector = xb[i].reshape(1, -1)\n",
    "    distances, indices = NSG_index.search(query_vector, k)\n",
    "    print(f\"节点 {i} 的近邻节点索引: {indices[0][1:]}\")\n",
    "\n",
    "index_file_path = \"nsg_index.faiss\"\n",
    "faiss.write_index(NSG_index, index_file_path)\n",
    "print(f\"索引已保存到 {index_file_path}\")\n",
    "\n",
    "# 从文件读取索引\n",
    "loaded_index = faiss.read_index(index_file_path)\n",
    "print(\"索引已从文件中读取\")\n",
    "\n",
    "# 可以继续使用加载的索引进行搜索等操作\n",
    "xq = np.random.random((1, d)).astype('float32')\n",
    "distances, indices = loaded_index.search(xq, k)\n",
    "print(\"使用加载的索引进行搜索的结果：\")\n",
    "print(\"距离:\", distances)\n",
    "print(\"索引:\", indices)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第1行:\n",
      "正在读取文件: nsg_index.faiss\n",
      "目标数组: [2307 1759 1515 ... 1155 2739  853]\n",
      "匹配成功! 偏移量: 83, 匹配的数字: 2307\n",
      "匹配成功! 偏移量: 87, 匹配的数字: 1759\n",
      "匹配成功! 偏移量: 91, 匹配的数字: 1515\n",
      "匹配成功! 偏移量: 95, 匹配的数字: 753\n",
      "匹配成功! 偏移量: 99, 匹配的数字: 2805\n",
      "匹配成功! 偏移量: 103, 匹配的数字: 366\n",
      "匹配成功! 偏移量: 107, 匹配的数字: 2758\n",
      "匹配成功! 偏移量: 111, 匹配的数字: 1812\n",
      "匹配成功! 偏移量: 115, 匹配的数字: 2980\n",
      "匹配成功! 偏移量: 119, 匹配的数字: 2428\n",
      "匹配成功! 偏移量: 123, 匹配的数字: 810\n",
      "匹配成功! 偏移量: 127, 匹配的数字: 1372\n",
      "匹配成功! 偏移量: 131, 匹配的数字: 157\n",
      "匹配成功! 偏移量: 135, 匹配的数字: 1535\n",
      "匹配成功! 偏移量: 139, 匹配的数字: 1881\n",
      "匹配成功! 偏移量: 143, 匹配的数字: 1155\n",
      "第2行:\n",
      "正在读取文件: nsg_index.faiss\n",
      "目标数组: [1115  226 1113 ... 2820 2496 2942]\n",
      "匹配成功! 偏移量: 147, 匹配的数字: 2739\n",
      "匹配成功! 偏移量: 207, 匹配的数字: 1115\n",
      "匹配成功! 偏移量: 211, 匹配的数字: 226\n",
      "匹配成功! 偏移量: 215, 匹配的数字: 1339\n",
      "匹配成功! 偏移量: 219, 匹配的数字: 2811\n",
      "匹配成功! 偏移量: 223, 匹配的数字: 1743\n",
      "匹配成功! 偏移量: 227, 匹配的数字: 2272\n",
      "匹配成功! 偏移量: 231, 匹配的数字: 408\n",
      "匹配成功! 偏移量: 235, 匹配的数字: 2739\n",
      "匹配成功! 偏移量: 239, 匹配的数字: 1373\n",
      "匹配成功! 偏移量: 243, 匹配的数字: 2015\n",
      "匹配成功! 偏移量: 247, 匹配的数字: 1994\n",
      "匹配成功! 偏移量: 251, 匹配的数字: 272\n",
      "匹配成功! 偏移量: 255, 匹配的数字: 2354\n",
      "匹配成功! 偏移量: 259, 匹配的数字: 2820\n",
      "匹配成功! 偏移量: 307, 匹配的数字: 227\n",
      "第3行:\n",
      "正在读取文件: nsg_index.faiss\n",
      "目标数组: [ 503  220  205 ... 1932  600 2420]\n",
      "匹配成功! 偏移量: 331, 匹配的数字: 503\n",
      "匹配成功! 偏移量: 335, 匹配的数字: 220\n",
      "匹配成功! 偏移量: 339, 匹配的数字: 205\n",
      "匹配成功! 偏移量: 343, 匹配的数字: 1552\n",
      "匹配成功! 偏移量: 347, 匹配的数字: 2177\n",
      "匹配成功! 偏移量: 351, 匹配的数字: 187\n",
      "匹配成功! 偏移量: 355, 匹配的数字: 470\n",
      "匹配成功! 偏移量: 359, 匹配的数字: 2800\n",
      "匹配成功! 偏移量: 363, 匹配的数字: 2368\n",
      "匹配成功! 偏移量: 367, 匹配的数字: 1255\n",
      "匹配成功! 偏移量: 371, 匹配的数字: 2421\n",
      "匹配成功! 偏移量: 375, 匹配的数字: 2583\n",
      "匹配成功! 偏移量: 379, 匹配的数字: 313\n",
      "匹配成功! 偏移量: 383, 匹配的数字: 958\n",
      "匹配成功! 偏移量: 387, 匹配的数字: 2556\n",
      "匹配成功! 偏移量: 391, 匹配的数字: 387\n",
      "第4行:\n",
      "正在读取文件: nsg_index.faiss\n",
      "目标数组: [2638 1228 2009 ...  701 1809 1627]\n",
      "匹配成功! 偏移量: 367, 匹配的数字: 1255\n",
      "匹配成功! 偏移量: 455, 匹配的数字: 2638\n",
      "匹配成功! 偏移量: 459, 匹配的数字: 1228\n",
      "匹配成功! 偏移量: 463, 匹配的数字: 2009\n",
      "匹配成功! 偏移量: 467, 匹配的数字: 2738\n",
      "匹配成功! 偏移量: 471, 匹配的数字: 173\n",
      "匹配成功! 偏移量: 475, 匹配的数字: 603\n",
      "匹配成功! 偏移量: 479, 匹配的数字: 2730\n",
      "匹配成功! 偏移量: 483, 匹配的数字: 489\n",
      "匹配成功! 偏移量: 495, 匹配的数字: 956\n",
      "匹配成功! 偏移量: 1079, 匹配的数字: 256\n",
      "匹配成功! 偏移量: 1211, 匹配的数字: 1536\n",
      "匹配成功! 偏移量: 2267, 匹配的数字: 1483\n",
      "匹配成功! 偏移量: 2415, 匹配的数字: 1289\n",
      "匹配成功! 偏移量: 3127, 匹配的数字: 1254\n",
      "匹配成功! 偏移量: 3343, 匹配的数字: 489\n",
      "第5行:\n",
      "正在读取文件: nsg_index.faiss\n",
      "目标数组: [1764 2731 2120 ... 1163 1827 1618]\n",
      "匹配成功! 偏移量: 447, 匹配的数字: 123\n",
      "匹配成功! 偏移量: 503, 匹配的数字: 1764\n",
      "匹配成功! 偏移量: 507, 匹配的数字: 2731\n",
      "匹配成功! 偏移量: 511, 匹配的数字: 2120\n",
      "匹配成功! 偏移量: 515, 匹配的数字: 2902\n",
      "匹配成功! 偏移量: 519, 匹配的数字: 1693\n",
      "匹配成功! 偏移量: 523, 匹配的数字: 575\n",
      "匹配成功! 偏移量: 527, 匹配的数字: 2088\n",
      "匹配成功! 偏移量: 531, 匹配的数字: 1296\n",
      "匹配成功! 偏移量: 535, 匹配的数字: 1604\n",
      "匹配成功! 偏移量: 539, 匹配的数字: 1634\n",
      "匹配成功! 偏移量: 543, 匹配的数字: 2026\n",
      "匹配成功! 偏移量: 547, 匹配的数字: 2072\n",
      "匹配成功! 偏移量: 551, 匹配的数字: 2193\n",
      "匹配成功! 偏移量: 555, 匹配的数字: 471\n",
      "匹配成功! 偏移量: 559, 匹配的数字: 2042\n"
     ]
    }
   ],
   "source": [
    "#输入一个文件路径和一维数组，按int32的格式不断读取该文件，并且读出来一个就和数组里面的每一个数进行比较，如果匹配成功则终止并且输出偏移量\n",
    "import struct\n",
    "\n",
    "# def read_and_compare(file_path, target_array):\n",
    "#     print(f\"正在读取文件: {file_path}\")\n",
    "#     print(f\"目标数组: {target_array}\")\n",
    "#     try:\n",
    "#         with open(file_path, 'rb') as file:\n",
    "#             offset = 0\n",
    "#             while True:\n",
    "#                 data = file.read(4)\n",
    "#                 if not data:\n",
    "#                     break\n",
    "#                 num = struct.unpack('<i', data)[0]\n",
    "#                 print(f\"读取的数字: {num}\")\n",
    "#                 if num in target_array:\n",
    "#                     print(f\"匹配成功! 偏移量: {offset}\")\n",
    "#                     return offset\n",
    "#                 offset += 4\n",
    "#         return -1\n",
    "#     except FileNotFoundError:\n",
    "#         print(\"错误: 文件未找到!\")\n",
    "#     except Exception as e:\n",
    "#         print(f\"错误: 发生了一个未知错误: {e}\")\n",
    "#     return -1\n",
    "\n",
    "def read_and_compare(file_path, target_array, k):\n",
    "    print(f\"正在读取文件: {file_path}\")\n",
    "    print(f\"目标数组: {target_array}\")\n",
    "    try:\n",
    "        with open(file_path, 'rb') as file:\n",
    "            data = file.read()\n",
    "            count = 0\n",
    "            for offset in range(len(data) - 3):\n",
    "                chunk = data[offset:offset + 4]\n",
    "                num = struct.unpack('<i', chunk)[0]\n",
    "                if num in target_array:\n",
    "                    print(f\"匹配成功! 偏移量: {offset}, 匹配的数字: {num}\")\n",
    "                    count += 1\n",
    "                    if(count > k):\n",
    "                        break\n",
    "        return -1\n",
    "    except FileNotFoundError:\n",
    "        print(\"错误: 文件未找到!\")\n",
    "    except Exception as e:\n",
    "        print(f\"错误: 发生了一个未知错误: {e}\")\n",
    "    return -1\n",
    "\n",
    "line_count = 5\n",
    "query_vector = xb[0:line_count].reshape(line_count, -1)\n",
    "distances, indices = NSG_index.search(query_vector, k)\n",
    "\n",
    "for i in range(line_count):\n",
    "    print(f\"第{i+1}行:\")\n",
    "    a = read_and_compare(index_file_path, indices[i][1:], k/2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义图结构，用于存储图以及写入图\n",
    "#图的结构用一个np数组表示，行数表示节点数，每行的列数固定，不足的部分填0\n",
    "class Graph:\n",
    "    def __init__(self, n, max_neighbor, node_type):\n",
    "        #值初始化为-1\n",
    "        self.graph = np.full((n, max_neighbor), -1, dtype=np.int32)        \n",
    "\n",
    "        self.n = n\n",
    "        self.max_neighbor = max_neighbor\n",
    "        \n",
    "        self.node_type = node_type\n",
    "\n",
    "    def print(self):\n",
    "        print(f'graph shape: {self.graph.shape}')\n",
    "        print(self.graph)\n",
    "\n",
    "    #从faiss输出的文件中读取图结构\n",
    "    def load_from_faiss(self, path):\n",
    "        #faiss存储图的结构是从83字节开始，4字节的int32存储邻接表，直到碰到0xFFFFFFFF结束\n",
    "        #循环读取，直到读取到全部n个节点\n",
    "        total_neighbors_count = 0\n",
    "        total_nodes = 0\n",
    "        with open(path, 'rb') as f:\n",
    "            f.seek(83)\n",
    "            for i in range(n):\n",
    "                neighbors = []\n",
    "                neighbors_count = 0\n",
    "                while True:\n",
    "                    neighbor = struct.unpack('I', f.read(4))[0]\n",
    "                    if neighbor == 0xFFFFFFFF:\n",
    "                        break\n",
    "                    neighbors.append(neighbor)\n",
    "                    neighbors_count += 1\n",
    "                self.graph[i][:neighbors_count] = neighbors[:neighbors_count]\n",
    "                total_neighbors_count += neighbors_count\n",
    "                total_nodes += 1\n",
    "        \n",
    "        print(f'total nodes: {total_nodes}, average neighbors: {total_neighbors_count/total_nodes}')\n",
    "\n",
    "graph = Graph(xb.shape[0], k)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "total nodes: 3000, average neighbors: 26.162\n",
      "graph shape: (3000, 30)\n",
      "[[2307 1759 1515 ... 2010 1689 2138]\n",
      " [1115  226 1339 ... 1469  997 1920]\n",
      " [ 503  220  205 ...  508  110  123]\n",
      " ...\n",
      " [2407 2553  936 ... 2422 2126 2582]\n",
      " [2039 1470  461 ...   -1   -1   -1]\n",
      " [1746 2922  610 ...  816   93 1239]]\n"
     ]
    }
   ],
   "source": [
    "graph.load_from_faiss(index_file_path)\n",
    "graph.print()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[  4096 135176 135696 135836]\n",
      "[ -6.814636 -13.958596 -10.036698 ...  23.633314  21.895636  27.194921]\n",
      "[[ -6.814636  -13.958596  -10.036698  ...  33.20695   -12.340407\n",
      "  -14.954563 ]\n",
      " [ 82.282974  -13.058938  -10.233725  ...  -9.216471   22.241264\n",
      "   18.251764 ]\n",
      " [-20.579313   -6.2813783  -8.618659  ...  71.87302    24.211761\n",
      "  -11.619771 ]\n",
      " ...\n",
      " [ 36.781063  -13.337156  -10.011109  ...  -8.600311  -12.481587\n",
      "   18.361734 ]\n",
      " [ 46.139416  -12.879691   -9.961021  ...   3.796484   24.17998\n",
      "   35.84262  ]\n",
      " [  5.5298367  -8.087718   -8.045947  ...  23.633314   21.895636\n",
      "   27.194921 ]]\n",
      "[30.217962 16.199577 10.580385  9.778535 12.078184 13.640146 15.974597\n",
      " 17.287785 33.845104 31.54921  35.147655 19.814009 16.495308 27.53277\n",
      " 32.561214 22.626284 36.562424 22.631338 36.64423  27.212704 16.191042\n",
      " 19.277597 30.132122 33.270386 29.020458 15.757306 17.712814 15.533003\n",
      " 13.733368  9.602468  9.473708 17.303083 57.31536  24.076023 13.06675\n",
      " 16.653318 23.511845 26.795458 24.225481 26.58053  62.394257 42.624184\n",
      " 35.461582 22.460863 24.10659  36.853836 36.003014 30.274387 59.30846\n",
      " 25.397903 40.133785 39.20795  26.45469  23.185238 30.211864 45.27107\n",
      " 53.103794 20.396494 24.155472 29.433067 28.029999 19.119545 14.203635\n",
      " 28.664322 56.310055 19.871244 22.60621  25.149097 24.07735  17.183664\n",
      " 13.695106 30.164259 63.204254 24.980349 36.446476 34.076797 23.818806\n",
      " 22.900612 33.16224  50.596848 58.25397  37.50044  32.731216 22.331501\n",
      " 26.773636 42.20078  39.932163 30.686317 53.830162 22.610554 13.60408\n",
      " 18.253159 27.09265  31.069788 25.728512 27.331709 29.044424 15.237104\n",
      " 16.839024 14.004757 12.396537  8.93796   9.551451 18.241873 37.17816\n",
      " 22.113619 33.318005 24.858948 15.814412 19.085934 33.172256 36.921658\n",
      " 33.032955 28.445766 32.41562  19.848536 16.78175  30.016125 36.06315\n",
      " 23.179516 29.404854 15.189937 10.73233  10.447757 13.059635 15.009542\n",
      " 16.727741 17.73365 ]\n",
      "[  0   4   8  12  16  20  24  28  32  36  40  44  48  52  56  60  64  68\n",
      "  72  76  80  84  88  92  96 100 104 108 112 116 120 124 128]\n",
      "近似原始向量: [ 17.781586    42.049248    12.053534    19.267666     0.9385462\n",
      "   1.7013845    5.7005196    6.3352566   11.280199   116.81071\n",
      "  95.778336    10.636364     1.006177     0.45781326   1.2465248\n",
      "  16.613365     3.3032799   29.293034   120.480194    24.98907\n",
      "   4.1620007    8.987225     4.1932583    1.9662952    0.7256775\n",
      "   6.2312603   94.213715     9.433811     3.7625532    7.124199\n",
      "   2.1853633    1.2996788  123.272316    23.514372     7.13767\n",
      "  29.43419     20.22346      3.960617     2.9880142   54.039383\n",
      " 119.10443     94.63948     37.7196      29.087267     0.934597\n",
      "   0.6994972    1.3781776   27.324495     5.4766197   11.033182\n",
      "  41.          58.375565    79.64796     38.822777     5.367281\n",
      "   3.6728134    2.8888893    6.782686     9.570281     5.7594814\n",
      "  22.914318    53.985916     9.849766     4.834507    84.38395\n",
      "  22.934925    39.260303    65.01518     16.62002     21.487232\n",
      "   7.152196    16.684372   121.78357     12.210732     1.4628983\n",
      "   1.1827126    1.636301     6.0806675   92.04033    118.103615\n",
      "   2.4001617    1.0772781    1.9628639   10.709163   109.89673\n",
      "  85.92444     80.2267      16.143578     8.000195     1.5332813\n",
      "   0.56762505   1.0493927   32.3953      42.410957    52.168297\n",
      "   9.436399     1.2649345    0.50651455   0.34323692   0.6752691\n",
      "   6.715909    77.607956    26.474432     6.005682     6.3134575\n",
      "   0.7856388    0.33781052   0.67814445   2.8616438   23.057533\n",
      " 112.80274    118.93425      7.4354496    1.2660427    0.57256126\n",
      "   0.9372635    4.643402    69.616135   119.74482     42.307526\n",
      "  14.530274     1.0863609    0.34066296   0.7582855    3.7481756\n",
      "  26.569344    20.173965    20.536497  ]\n",
      "dataset: [ 12.  47.  14.  25.   2.   3.   4.   7.  14. 122.  90.   7.   0.   0.\n",
      "   6.  14.   0.  24. 122.  22.   2.   4.   6.   0.   0.  10.  93.  10.\n",
      "   6.   6.   0.   0. 122.  31.   9.  23.  19.   9.   8.  56. 122. 100.\n",
      "  29.  19.   3.   0.   0.  25.   3.   9.  43.  59.  76.  32.   0.   0.\n",
      "   8.   6.  10.   7.  24.  58.   1.   1.  81.  23.  32.  68.  14.  19.\n",
      "  10.  23. 122.  13.   2.   1.   4.   9.  86. 122.   3.   0.   0.   8.\n",
      " 122.  95.  68.  30.   9.   2.   0.   2.  26.  50.  44.  13.   0.   0.\n",
      "   0.   3.  12.  82.  18.   7.   6.   0.   0.   0.   2.  20. 112. 122.\n",
      "   6.   5.   1.   0.   3.  69. 122.  43.  15.   1.   0.   0.   0.  27.\n",
      "  29.  21.]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "np.float32(46.91967)"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 读取DiskANN的PQ向量测试\n",
    "import numpy as np\n",
    "\n",
    "def read_pq_pivots(pivots_file):\n",
    "    \"\"\"读取_pq_pivots.bin文件中的质心数据\"\"\"\n",
    "    with open(pivots_file, 'rb') as f:\n",
    "        # 读取元数据的条目数和维度数\n",
    "        metadata_num = np.fromfile(f, dtype=np.uint32, count=1)[0]  # 条目数\n",
    "        metadata_dim = np.fromfile(f, dtype=np.uint32, count=1)[0]  # 维度数\n",
    "        assert metadata_dim == 1, f\"元数据维度应为1，但实际为{metadata_dim}\"\n",
    "        \n",
    "        # 读取元数据偏移量（修正：先读后用）\n",
    "        metadata_offset = np.fromfile(f, dtype=np.uint64, count=metadata_num)\n",
    "        print(metadata_offset)\n",
    "        \n",
    "        # 读取全量枢轴数据（质心表）\n",
    "        f.seek(metadata_offset[0])\n",
    "        num_centroids = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "        dim_per_centroid = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "        centroids = np.fromfile(f, dtype=np.float32, count=num_centroids * dim_per_centroid)\n",
    "        print(centroids)\n",
    "        centroids = centroids.reshape(num_centroids, dim_per_centroid)\n",
    "        print(centroids)\n",
    "        \n",
    "        # 读取质心向量\n",
    "        f.seek(metadata_offset[1])\n",
    "        num_global_centroid = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "        dim_per_global_centroid = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "        global_centroid = np.fromfile(f, dtype=np.float32, count=num_global_centroid*dim_per_global_centroid)\n",
    "        print(global_centroid)\n",
    "        \n",
    "        # 读取块偏移量（子空间划分）\n",
    "        f.seek(metadata_offset[2])\n",
    "        num_chunk_offsets = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "        dim_chunk_offsets = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "        chunk_offsets = np.fromfile(f, dtype=np.uint32, count=num_chunk_offsets*dim_chunk_offsets)\n",
    "        print(chunk_offsets)\n",
    "    \n",
    "    return centroids, global_centroid, chunk_offsets\n",
    "\n",
    "def pq_to_vector(pq_code, centroids, global_centroid, chunk_offsets, M, K):\n",
    "    \"\"\"将PQ向量还原为近似原始向量\"\"\"\n",
    "    D = len(global_centroid)  # 原始向量维度\n",
    "    x_approx = np.zeros(D, dtype=np.float32)\n",
    "    \n",
    "    # 对每个子空间m\n",
    "    for m in range(M):\n",
    "        i_m = int(pq_code[m])  # 第m个子空间的质心索引\n",
    "        \n",
    "        # 使用chunk_offsets动态确定子空间的起止位置\n",
    "        start_idx = chunk_offsets[m]\n",
    "        end_idx = chunk_offsets[m + 1]\n",
    "        chunk_size = end_idx - start_idx\n",
    "        \n",
    "        # # 从质心表中获取对应质心向量\n",
    "        # print(\"m:\",m,\"K:\",K,\"i_m:\",i_m)\n",
    "        # centroid_idx = m * K + i_m  # 质心表中的索引\n",
    "        # print(\"centroid_idx:\",centroid_idx)\n",
    "        \n",
    "        # 确保维度匹配\n",
    "        # assert centroids.shape[1] >= chunk_size, f\"质心维度({centroids.shape[1]})小于子空间维度({chunk_size})\"\n",
    "        x_approx[start_idx:end_idx] = centroids[i_m, start_idx:end_idx]\n",
    "    \n",
    "    # 加上全局质心向量（反中心化）\n",
    "    x_approx += global_centroid\n",
    "    \n",
    "    return x_approx\n",
    "\n",
    "# 使用示例\n",
    "pivots_file = \"/home/ljl/Code/vector-ssd/annlite_builder/sift_base_diskann_pq32_R30/diskann_output/_pq_pivots.bin\"\n",
    "compressed_file = \"/home/ljl/Code/vector-ssd/annlite_builder/sift_base_diskann_pq32_R30/diskann_output/_pq_compressed.bin\"\n",
    "\n",
    "# 读取质心数据\n",
    "centroids, global_centroid, chunk_offsets = read_pq_pivots(pivots_file)\n",
    "\n",
    "# 假设已知参数\n",
    "M = 32   # 子空间数量\n",
    "K = 256 # 每个子空间的质心数量\n",
    "\n",
    "pq_codes = None\n",
    "# 读取一行PQ向量（示例：假设为第一个向量）\n",
    "with open(compressed_file, 'rb') as f:\n",
    "    num_pq_code = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "    dim_pq_code = np.fromfile(f, dtype=np.uint32, count=1)[0]\n",
    "    pq_codes = np.fromfile(f, dtype=np.uint8, count=num_pq_code*dim_pq_code)  # 读取M个字节\n",
    "    pq_codes = pq_codes.reshape(num_pq_code, dim_pq_code)\n",
    "\n",
    "node_id = 3\n",
    "\n",
    "# 还原为近似原始向量\n",
    "approx_vector = pq_to_vector(pq_codes[node_id], centroids, global_centroid, chunk_offsets, M, K)\n",
    "print(\"近似原始向量:\", approx_vector)\n",
    "\n",
    "print(\"dataset:\",dataset[node_id])\n",
    "#计算dataset[0]和approx_vector的l2距离\n",
    "def l2_distance(vector1, vector2):\n",
    "    return np.linalg.norm(vector1 - vector2)\n",
    "l2_distance(dataset[node_id], approx_vector)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "PQ_index train done\n",
      "PQ_index add done\n"
     ]
    }
   ],
   "source": [
    "#和faiss的PQ做对比\n",
    "PQ_index = faiss.IndexPQ(dataset.shape[1], M, 8)\n",
    "PQ_index.train(dataset)\n",
    "print(\"PQ_index train done\")\n",
    "PQ_index.add(dataset)\n",
    "print(\"PQ_index add done\")\n",
    "\n",
    "faiss_pq_codes = PQ_index.reconstruct_n(node_id, PQ_index.ntotal) #重建向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "FAISS PQ distance average:  61.872673\n",
      "DiskANN PQ distance average:  59.179184\n"
     ]
    }
   ],
   "source": [
    "faiss_distance_sum = 0\n",
    "diskann_distance_sum = 0\n",
    "faiss_distance_list = [] #用于计算方差\n",
    "diskann_distance_list = []\n",
    "for node_id in range(len(dataset)):\n",
    "    approx_vector = pq_to_vector(pq_codes[node_id], centroids, global_centroid, chunk_offsets, M, K)\n",
    "    faiss_distance_sum += l2_distance(dataset[node_id], faiss_pq_codes[node_id])\n",
    "    diskann_distance_sum += l2_distance(dataset[node_id], approx_vector)\n",
    "    faiss_distance_list.append(l2_distance(dataset[node_id], faiss_pq_codes[node_id]))\n",
    "    diskann_distance_list.append(l2_distance(dataset[node_id], approx_vector))\n",
    "print(\"FAISS PQ distance average: \", faiss_distance_sum / len(dataset))\n",
    "print(\"DiskANN PQ distance average: \", diskann_distance_sum / len(dataset))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "127.3958\n",
      "90.90087\n"
     ]
    }
   ],
   "source": [
    "#计算faiss_distance_list的方差\n",
    "def var(list):\n",
    "    list_np = np.array(list)\n",
    "    var = np.var(list_np)\n",
    "    return var\n",
    "print(var(faiss_distance_list))\n",
    "print(var(diskann_distance_list))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'float32'"
      ]
     },
     "execution_count": 42,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "str(l2_dataset.dtype)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
