# ANNLite索引构建器的实现
import numpy as np
import time
import os
import struct
import faiss
import subprocess
import matplotlib.pyplot as plt
import json

build_config_file_path = "build_config.json"

config = None
#从json文件中读取参数
with open(build_config_file_path, "r") as f:
    config = json.load(f)

# 索引结构
build_mode = config["build_mode"]
build_modes = ["annlite", "diskann", "spann", "aisaq"]
build_mode_index = build_modes.index(build_mode) if build_mode in build_modes else -1
if build_mode_index == -1:
    raise ValueError("build_mode must be annlite, diskann, spann or aisaq")

# annlite就是三层结构
# diskann就一层，L2采用node_type=1对原始数据集建图，然后保存pq表和pq向量以及图
# spann是二层结构，L1采用node_type=2，L2没有，L3与annlite相同
# aisaq也是单层结构，L2采用node_type=3对原始数据集建图

# 数据集参数和索引参数等
written = True #是否写入文件。避免测试时覆盖已写入文件
evaluation = config["evaluation"] #是否测试聚类精度，以及是否计算分层精度文件
overwrite = config["overwrite"] #是否覆盖已有的构建索引
# 写入文件时的对齐粒度，仅对图有效，因为L3无需对齐
alignment_size = config["alignment_size"]

dataset_path = config["dataset_path"]
query_path = config["query_path"]
gt_path = config["gt_path"]
type = None
if config["feature_type"] == "uint8":
    type = np.uint8
elif config["feature_type"] == "float32":
    type = np.float32
else:
    raise ValueError("feature_type must be uint8 or float32")

print("dataset_path:", dataset_path)
print("query_path:", query_path)
print("gt_path:", gt_path)

# 聚类参数
cluster_count = config["cluster_count"] #设置聚类数量
train_ratio = config["train_ratio"] #设置训练比例，例如1是全部向量用来训练
cluster_redundancy = config["cluster_redundancy"] #设置冗余数量，即一个点会被分到距离他最近的多少个聚类里面

# 查询参数。这个只在builder的evaluation时使用
k = 10 #返回多少个近邻
nprobe = 10 #查询多少个聚类
max_query = config["max_query"] #给前多少个查询计算gt文件
gt_k = config["gt_k"] #计算的gt文件里面包含多少个近邻

# 建图时的参数
l2_graph_R = config["l2_graph_R"]  #平均邻居数，不能过小

graph_type = config["graph_type"] #选择建图算法
# 若选择diskann则需要指定下列参数
diskann_executor_path = config["diskann_executor_path"] #diskann可执行程序的路径
diskann_build_L = config["diskann_build_L"]  #构建时的候选列表长度,越长精度越高
diskann_search_dram_budget = 0 #目标内存大小由PQ_bucket和向量数量计算得出
diskann_build_dram_limit = config["diskann_build_dram_limit"] #构建时的可用内存（GB），影响建图速度和精度，系统允许下越大越好
diskann_page_size = config["diskann_page_size"] #DiskANN中一个页面的大小，仅用于读取DiskANN的参数

# 创建PQ向量的参数
pq_bucket = config["pq_bucket"] #将向量分为多少个桶，需要能够整除向量维度
pq_bit = config["pq_bit"] #用多少个bit来表示一个维度。目前只支持了8bit

# 输出文件夹
# 获取当前工作目录
# output_folder = os.getcwd() + "/"
output_folder = ""
output_folder += dataset_path.split("/")[-1].split(".")[0]

if(build_mode == "annlite"):
    output_folder += "_annlite"
    output_folder += "_c" + str(cluster_count)
    output_folder += "_re" + str(cluster_redundancy)
elif(build_mode == "diskann"):
    output_folder += "_diskann"
elif(build_mode == "spann"):
    output_folder += "_spann"
    output_folder += "_c" + str(cluster_count)
    output_folder += "_re" + str(cluster_redundancy)
elif(build_mode == "aisaq"):
    output_folder += "_aisaq"

output_folder += "_pq" + str(pq_bucket)
output_folder += "_R" + str(l2_graph_R)
output_folder += "_l1s" + str(config["l1_graph_sample_ratio"])
output_folder += "/"
print("output_folder:",output_folder)

# 1.底层IVF索引路径
# 聚类中心文件路径
cluster_center_path = output_folder+"cluster_centers.bin" #聚类中心点, 用于DiskANN建图用
# 底层的偏移量文件路径
offset_list_path = output_folder+"offset_list.bin" #偏移量数组，用于在last_layer中查询该聚类的偏移量。可以将长度固定来避免索引开销
# 底层的聚类文件
last_layer_path = output_folder+"last_layer.bin" #存储每个聚类内的向量

# 2.中间层索引路径
pq_vector_path = output_folder+"centroid_pq_vector.bin" #存储聚类中心向量的PQ编码向量
pq_table_path = output_folder+"centroid_pq_table.bin" #存储聚类中心向量的PQ编码表
pq_centorid_path = output_folder+"centroid_pq_centroid.bin" #存储PQ聚类中心点，里面只有一个向量。如果不是diskann建图的话里面为全0
l2_graph_root_path = output_folder+"L2_graph.bin" #这个只是基础路径，根据不同的图节点类型在后面会加上.type0等后缀
l2_gt_path = output_folder+"L2_graph_gt.bin"
diskann_output_path = output_folder+"diskann_output/"
#可以在查询器中的获取聚类中心分为2个方式，保持接口相同，即输入聚类id获取聚类中心向量，
#一种是直接从cluster_center_path中读取向量
#一种是使用PQ量化器读取向量
#两种分别实现后便于实验对比，使用量化器后精度损失了多少

# 3.顶层索引
l1_graph_path = output_folder+"L1_graph.bin" #存储顶层图的邻接表
l1_gt_path = output_folder+"L1_graph_gt.bin"
l1_graph_vector_path = output_folder+"L1_graph_vector.bin" #存储顶层图的向量,与l2的向量有重复的部分，后续可以优化掉
l1_l2_graph_node_mapping_path = output_folder+"L1_L2_graph_node_mapping.bin" #存储L1图的节点到L2图的节点映射关系
l1_graph_sample_ratio = config["l1_graph_sample_ratio"] #设置采样比例，例如0.1表示每个聚类只保留10%的节点. 如果采样模式是ncs，则表示L1的总节点数量
l1_simpling_method = ["random", "ncs"][1] #random: 纯随机抽样建图；ncs:Neighbor-Capturing Sampling, 邻居捕获采样.随机采样一小部分点然后采样这些点的邻居.
l1_ncs_threshold = 1 #当被采样到的节点的邻居，这些点的邻居也被采样到达到该比例后同样也被标记为不用访问
l1_cachelist_path = output_folder+"L1_cache_list.bin" #标记l1的哪些点是完全缓存的，l2可以不用重新访问了
l1_graph_R = l2_graph_R #现阶段和L2图的R保持一致，后续可以优化
# l1_graph_node_type = 2 #这里的node_type与L2的语义是一致的。1表示把原始向量接入图中，2表示接入PQ向量; 2是默认，代表annlite

#配置文件路径，用于给searcher传递参数
config_path = output_folder+"dataset_config.json"
user_config_path = output_folder+"user_config.json" 

#重新设定参数
if(build_mode == "annlite"):
    print("build_mode: annlite")
    l1_graph_node_type = 2
    l2_graph_node_type = 4
elif(build_mode == "diskann"):
    print("build_mode: diskann")
    l1_graph_node_type = None
    l2_graph_node_type = 1
elif(build_mode == "spann"):
    print("build_mode: spann")
    l1_graph_node_type = None
    l2_graph_node_type = 2 #不要l1，l2图用来在内存中常驻，图里面包含向量信息
elif(build_mode == "aisaq"):
    print("build_mode: aisaq")
    l1_graph_node_type = None
    l2_graph_node_type = 4

#检查output_folder是否存在，不存在则创建
if not os.path.exists(output_folder):
    os.makedirs(output_folder)
else:
    if overwrite:
        print("Warning: output_folder exists, will overwrite it")
    else:
        raise Exception(f"output_folder {output_folder} exists, please set overwrite to 1")

#检查数据集文件是否存在
if not os.path.exists(dataset_path):
    raise Exception("dataset_path:",dataset_path,"not exists")
if evaluation and not os.path.exists(query_path):
    raise Exception("query_path:",query_path,"not exists")
if evaluation and not os.path.exists(gt_path):
    raise Exception("gt_path:",gt_path,"not exists")

#读取bin格式的数据集文件，数据集、查询集、GT集都可以使用这个函数读取
def read_bin(file_path, type):
    file_size = os.path.getsize(file_path)
    print("Read data from", file_path, "\nfile size:",file_size)
    feature_size = np.dtype(type).itemsize
    with open(file_path,"rb") as fd:
        lines = int.from_bytes(fd.read(4), byteorder='little')
        dim = int.from_bytes(fd.read(4), byteorder='little')
        print("lines:",lines,"dim:",dim)

        data_size = lines * dim * feature_size
        if(data_size+8 != file_size):
            raise Exception(f"Error! file size {file_size} and argument {data_size+8} not match!") # 判断实际文件大小是否与参数匹配，简单的纠错机制

        binary_data = fd.read(data_size)
        vectors = np.frombuffer(binary_data, dtype=type)
        vectors = vectors.reshape(lines, dim)
        print("Returned vector list:",vectors.shape, vectors.dtype)

        return vectors
    
#计算Recall，I是搜索结果，gts是ground truth，k是计算前多少个搜索结果的召回率
def compute_recall(I, gts, k):
    num_queries = I.shape[0]
    recall_sum = 0
    
    for i in range(num_queries):
        retrieved = set(I[i])
        ground_truth = set(gts[i][:k])  # 取前k个真实结果
        correct = len(retrieved.intersection(ground_truth))
        recall_sum += correct / k
    
    return recall_sum / num_queries

#将向量集以.bin的格式写入文件中，主要用于写入质心
def write_bin(filename, array):
    # 检查输入是否为numpy数组
    if not isinstance(array, np.ndarray):
        raise ValueError("Input must be a NumPy array.")
    
    # 获取数组的形状（行数和列数）
    rows, cols = array.shape
    
    # 打开文件准备写入
    with open(filename, 'wb') as f:
        # 将行数和列数写入文件的前8个字节
        np.array(rows, dtype=np.uint32).tofile(f)
        np.array(cols, dtype=np.uint32).tofile(f)
        
        # 写入数组数据
        # 确保数据类型正确，这里假设使用float32
        array.tofile(f)

#计算查询集在dataset中的ground truth文件，用于计算每层的召回率
def compute_gt(dataset, query_set, gt_file_path, k):
    print("Computing ground truth on dataset including", dataset.shape[0], "vectors for", query_set.shape[0], "queries and", k, "ground truths per query")
    #记录起始时间
    start_time = time.time()
    
    gt_array = np.zeros((query_set.shape[0], k), dtype=np.uint32)
    for i in range(query_set.shape[0]):
        if(i % 1000 == 0):
            print("Computed ground truth for query", i)
        query = query_set[i]
        distances = np.linalg.norm(dataset - query, axis=1)
        indices = np.argsort(distances)[:k]
        gt_array[i] = indices
    
    with open(gt_file_path, 'wb') as f:
        np.array(gt_array.shape[0], dtype=np.uint32).tofile(f)
        np.array(gt_array.shape[1], dtype=np.uint32).tofile(f)
        gt_array.tofile(f)

    print("Ground truth file saved to", gt_file_path)
    end_time = time.time()
    print(f"Time taken: {end_time - start_time} seconds")

from concurrent.futures import ThreadPoolExecutor
import threading
def compute_gt_multithread(dataset, query_set, gt_file_path, k, num_threads=16):
    """
    计算查询集在dataset中的ground truth文件，用于计算每层的召回率
    
    参数:
        dataset: 数据集向量
        query_set: 查询集向量
        gt_file_path: 保存ground truth的文件路径
        k: 每个查询需要的最近邻数量
        num_threads: 使用的线程数，默认为CPU核心数
    """
    print(f"Computing ground truth on dataset including {dataset.shape[0]} vectors for {query_set.shape[0]} queries and {k} ground truths per query")
    #记录起始时间
    start_time = time.time()

    gt_array = np.zeros((query_set.shape[0], k), dtype=np.uint32)
    
    # 每个线程处理的查询数量
    queries_per_thread = query_set.shape[0] // num_threads
    if queries_per_thread == 0:
        queries_per_thread = 1
        num_threads = min(num_threads, query_set.shape[0])
    
    # 共享计数器和锁
    processed_count = 0
    count_lock = threading.Lock()

    # 定义线程处理函数
    def process_queries(start_idx, end_idx):
        nonlocal processed_count
        for i in range(start_idx, end_idx):
            query = query_set[i]
            diff = dataset.astype('float32') - query #直接以uint8进行计算的话会出错
            distances = np.linalg.norm(diff, axis=1)
            indices = np.argsort(distances)[:k]
            gt_array[i] = indices
            
            # 更新计数器并检查是否需要打印进度
            with count_lock:
                processed_count += 1
                if processed_count % 1000 == 0:
                    print(f"Computed ground truth for {processed_count} queries")
    
    # 创建并执行线程池
    with ThreadPoolExecutor(max_workers=num_threads) as executor:
        futures = []
        for i in range(num_threads):
            start_idx = i * queries_per_thread
            end_idx = start_idx + queries_per_thread
            if i == num_threads - 1:
                end_idx = query_set.shape[0]  # 最后一个线程处理剩余的查询
            
            if start_idx < end_idx:
                futures.append(executor.submit(process_queries, start_idx, end_idx))
        
        # 等待所有线程完成
        for future in futures:
            future.result()
    
    # 保存结果
    with open(gt_file_path, 'wb') as f:
        np.array(gt_array.shape[0], dtype=np.uint32).tofile(f)
        np.array(gt_array.shape[1], dtype=np.uint32).tofile(f)
        gt_array.tofile(f)
    
    print(f"Ground truth file saved to {gt_file_path}")    
    end_time = time.time()
    print(f"Time taken: {end_time - start_time} seconds")

#读取数据集
dataset = read_bin(dataset_path, type)
print("read dataset of shape", dataset.shape)

queries = None
gts = None
if(evaluation):
    queries = read_bin(query_path, type)
    print("read query of shape", queries.shape)

    gts = read_bin(gt_path, np.uint32)
    print("read gt of shape", gts.shape)

#用faiss的IVF库来构建IVF索引
IVF_index = faiss.index_factory(dataset.shape[1], f"IVF%d,Flat"%cluster_count)  # 创建一个IVF索引

if(build_mode == "annlite" or build_mode == "spann"):
    # 训练索引
    print("Start train of L3 cluster")
    st = time.time()
    if train_ratio < 1 :
        step = int(1/train_ratio) #步长，即我们间隔多远取一个向量来训练
        IVF_index.train(dataset[::step])
    else :
        IVF_index.train(dataset)
    et = time.time()
    print("Train done, spend: %.1f second"%(et-st))

    # 添加向量到索引
    print("Start add vector")
    st = time.time()
    IVF_index.add(dataset)
    et = time.time()
    print("Add done, spend: %.1f second"%(et-st))

    #保存索引
    #创建用户名
    dataset_name = dataset_path.split("/")[-1].split(".")[0]
    if(written):
        print("Writing index")
        # faiss.write_index(IVF_index, output_folder+"ivf_index_%s_%d.faiss"%(dataset_name,cluster_count)) #输出的这个作用不大可以不管
        print("Write done")
    else:
        print("Not write index")

#第三层的召回精度测试，同时也是聚类数量对召回精度的影响测试
if(build_mode == "annlite" or build_mode == "spann"):
    if(evaluation):
        for i in [1,2,4,8,16,32]: #搜索的聚类个数
            IVF_index.nprobe = i
            D, I = IVF_index.search(queries, k)  # 实际上搜索索引
            # 对D的距离进行开方
            # D = np.sqrt(D)
            print("recall test of nprobe",i,":",compute_recall(I, gts, k))

#获取量化器，用于获取质心
quantizer = IVF_index.quantizer
centroids = np.zeros((cluster_count, dataset.shape[1]), dtype=np.float32)
if(build_mode == "annlite" or build_mode == "spann"):
    quantizer.reconstruct_n(0, cluster_count, centroids)
    print("centroids shape:", centroids.shape)
    print("centroids dtype:", centroids.dtype)

    #写入质心
    if(written):
        write_bin(cluster_center_path, centroids)
        print("cluster center has written to:",cluster_center_path)

#获取向量所属的聚类ID
# def get_cluster_id_for_vector(vector, quantizer):
#     # 使用 search 方法获取向量所属的聚类 ID
#     _, cluster_id = quantizer.search(np.expand_dims(vector, axis=0), 1) #第二个参数为返回个数
#     return cluster_id[0][0]

#批量获取每个向量所属的聚类ID
def get_cluster_id_for_vectors(vectors, quantizer, retrieve_k):
    # 使用 search 方法获取向量所属的聚类ID
    _, cluster_ids = quantizer.search(vectors, retrieve_k)
    #将cluster_ids转化为一个一维数组
    # cluster_ids = cluster_ids.flatten()
    return cluster_ids

#依次获取dateset所有向量的聚类ID，并记录在一个数组中。数组长度等于聚类数量，每个元素代表每个聚类的向量数量
def statis_cluster(vectors, quantizer, retrieve_k):
    cluster_ids = get_cluster_id_for_vectors(vectors, quantizer, retrieve_k) #得到的是一个行为向量个数，列为k的二维数组
    cluster_ids_statis = [0]*cluster_count #统计每个聚类的出现次数

    for ids in cluster_ids:
        for id in ids:
            cluster_ids_statis[id]+=1
    return cluster_ids_statis, max(cluster_ids_statis), min(cluster_ids_statis)

if(build_mode == "annlite" or build_mode == "spann"):
    # ids = get_cluster_id_for_vectors(dataset,quantizer)
    statis_result, max_value, min_value = statis_cluster(dataset, quantizer, cluster_redundancy)

    #a是一个一维数组，里面每个元素表示统计出的出现次数，画出一个统计出现次数的直方图
    # plt.hist(statis_result, bins=100)
    # plt.show()

    #统计a列表中元素的最大值、最小值和平均值
    print("聚类元素数量最大值：", max_value, "最小值：", min_value)

if(build_mode == "annlite" or build_mode == "spann"):
    #获取每个向量的所属聚类的id
    cluster_ids_of_dataset = get_cluster_id_for_vectors(dataset,quantizer,cluster_redundancy)

    #按照ids中记录的每个向量的中心点id，将dataset中的向量整理为一个二维数组
    #初始化空二维数组
    clustered_dataset = [[] for i in range(cluster_count)]

    # for i in range(len(ids)):
    #     clustered_dataset[ids[i]].append(i)
    for node_id in range(cluster_ids_of_dataset.shape[0]):
        for cid_index in range(cluster_ids_of_dataset.shape[1]):
            clustered_dataset[cluster_ids_of_dataset[node_id][cid_index]].append(node_id)

    #将clustered_dataset转化为一个np数组，其中记录的是clustered_dataset每行的元素数量
    clustered_dataset_statis = np.array([len(i) for i in clustered_dataset])

    #记录偏移量数组
    offset_list = np.zeros(shape=(cluster_count+1,),dtype=np.int32)
    for i in range(cluster_count):
        offset_list[i] = np.sum(clustered_dataset_statis[:i])
    offset_list[-1] = cluster_ids_of_dataset.size

    #将该数组以二进制写入文件
    with open(offset_list_path,'wb') as f:
        f.write(struct.pack('i',offset_list.shape[0]))
        f.write(struct.pack('i',1))
        f.write(offset_list.tobytes())
    print("Write offset list to file")

    #按照聚类结果，将dataset依次写入文件。每个向量的头4字节表示该向量的id（因为聚类后把顺序打乱了）
    with open(last_layer_path,'wb') as f:
        #对于每个聚类，写入该聚类中的向量
        for line in clustered_dataset:
            for id in line:
                #将id写入文件
                f.write(struct.pack('i',id))
                #将np数组dataset[id]写入文件
                f.write(dataset[id].tobytes())
    print("Write last layer to file")

#建PQ向量。如果用的nsg就用faiss，如果是用的vamana就从文件中读取pq向量
pq_codes = None
if(graph_type == "nsg"):
    PQ_index = faiss.IndexPQ(dataset.shape[1], pq_bucket, pq_bit)
    pq_dataset = None
    #annlite和spann需要对中心点进行PQ压缩，DiskANN和AISAQ是对原始向量建图，然后对原始向量进行PQ压缩并获取向量的PQ表示
    if(build_mode == "annlite" or build_mode == "spann"):
        pq_dataset = centroids
    elif(build_mode == "diskann" or build_mode == "aisaq"):
        pq_dataset = dataset
    print("PQ_index train start, shape:",pq_dataset.shape)
    # 对中心点进行PQ压缩
    PQ_index.train(pq_dataset)
    print("PQ_index train done")
    PQ_index.add(pq_dataset)
    print("PQ_index add done")

    # 获取 PQ 压缩向量并转换为 NumPy 数组
    # 里面存的是压缩后的每个向量的压缩表示，但是faiss会把他压缩存储进uint8类型
    # pq_codes的长度=簇心数量*量化bit/8
    pq_codes = faiss.vector_to_array(PQ_index.codes)
    pq_codes = pq_codes.reshape(pq_dataset.shape[0], pq_bucket)
    print("pq_codes.shape",pq_codes.shape,"pq_codes.dtype",pq_codes.dtype)

    pq_table = PQ_index.pq 
    pq_table = faiss.vector_to_array(pq_table.centroids)
    pq_table = pq_table.reshape(pq_bucket, 2**pq_bit, -1)
    print("pq_table.shape",pq_table.shape,"pq_table.dtype",pq_table.dtype)
    pq_table = pq_table.reshape(pq_bucket, -1)

    #写入PQ表和PQ压缩向量
    write_bin(pq_vector_path, pq_codes)
    write_bin(pq_table_path, pq_table)

    #创建一个空白的PQ中心点，和DiskANN的PQ解压缩保持一直
    pq_centorid = np.zeros((1,dataset.shape[1]),dtype=np.float32) #不知道uint8聚类后的数据类型，先用float32
    write_bin(pq_centorid_path, pq_centorid)
    
#如果是vamana图，则PQ向量需要在建图之后才能获取

#定义图结构，用于存储图以及写入图
#图的结构用一个np数组表示，行数表示节点数，每行的列数固定，不足的部分填0
class Graph:
    def __init__(self, n, max_neighbor):
        #值初始化为-1
        self.graph = np.full((n, max_neighbor), -1, dtype=np.int32)        

        self.n = n
        self.max_neighbor = max_neighbor
        self.neighbor_table_size = self.max_neighbor * 4
        self.vector_size = 0 #默认是纯图的类型，type为0
        self.node_type = 0
        self.vectors = None
        self.raw_vectors = None #node_type为4时使用。用于在后面再追加一个原始向量

    def update_node_type(self, vectors, type, raw_vectors=None):
        self.node_type = type
        self.vectors = vectors #存储节点对应的向量信息，如果是None则表示没有传入节点信息
        if self.vectors is not None: #检查传入的向量行数是否正确
            assert self.vectors.shape[0] == self.n
            print(f'Update graph vector shape: {self.vectors.shape} in type {self.vectors.dtype}')
            self.vectors = self.vectors.reshape((self.n, -1))
            self.vector_size = self.vectors.shape[1] * self.vectors.itemsize
        
        if self.node_type == 4: #如果节点类型为4，则需要额外存储原始向量
            self.raw_vectors = raw_vectors
            self.node_size = self.neighbor_table_size + self.vector_size + self.raw_vectors.shape[1] * self.raw_vectors.itemsize
            print(f"Updated graph node size(B): {self.node_size}, neighbor_table_size(B): {self.neighbor_table_size}, vector_size(B): {self.vector_size}, raw_vectors_size(B): {self.raw_vectors.shape[1] * self.raw_vectors.itemsize}")
        else:
            self.node_size = self.neighbor_table_size + self.vector_size
            print(f"Updated graph node size(B): {self.node_size}, neighbor_table_size(B): {self.neighbor_table_size}, vector_size(B): {self.vector_size}")

    #构建AiSAQ的结构
    def build_AiSAQ(self, pq_codes):
        neibor_pq_codes = np.zeros((self.n, self.max_neighbor, pq_codes.shape[1]), dtype=pq_codes.dtype)
        for i in range(self.n):
            for j in range(self.max_neighbor):
                if self.graph[i][j] != -1:
                    neibor_pq_codes[i][j] = pq_codes[self.graph[i][j]]
        return neibor_pq_codes

    def print(self, print_all=False):
        threshold = 3
        if print_all:
            threshold = np.inf
        np.set_printoptions(threshold=threshold)
        print(f'graph shape: {self.graph.shape}')
        print(self.graph)
        np.set_printoptions(threshold=3)

    def save_as_bin(self, path, alignment=4096): #写入时的对齐粒度
        node_per_page = alignment // self.node_size
        print(f"Alignment size: {alignment}, self.node_size: {self.node_size}, node per page: {node_per_page}")
        if node_per_page <= 0:
            raise ValueError("Alignment size must be greater than node size")

        with open(path, 'wb') as f:
            f.write(struct.pack('I', self.n))
            f.write(struct.pack('I', self.max_neighbor))
            f.write(struct.pack('I', self.node_type))
            f.write(struct.pack('I', node_per_page))
            f.write(struct.pack('I', self.neighbor_table_size))
            f.write(struct.pack('I', self.vector_size))
            if self.raw_vectors is not None:
                f.write(struct.pack('I', self.raw_vectors.shape[1] * self.raw_vectors.itemsize)) #写入原始向量的占用字节大小
                #写入原始向量的类型，因为即使图类型相同，这里的向量也可能在不同索引结构下有差异
                dtype_str = str(self.raw_vectors.dtype)  # 原始字符串（如'float32'）
                dtype_bytes = dtype_str.encode('utf-8')  # 转为字节（如b'float32'，长度6字节）
                # 填充至8字节：使用 ljust(8, b'\x00')，不足则补空字节，超过则保留原长度（需配合截断）
                fixed_bytes = dtype_bytes.ljust(8, b'\x00')[:8]  # 确保最终长度为8字节
                f.write(fixed_bytes)  # 写入固定8字节

            f.seek(alignment) #第一页空出来只存头信息

            written_vectors = 0
            for i in range(self.n):
                f.write(self.graph[i].tobytes())
                if self.vectors is not None:
                    f.write(self.vectors[i].tobytes())
                if self.raw_vectors is not None:
                    f.write(self.raw_vectors[i].tobytes())

                written_vectors += 1
                if written_vectors >= node_per_page:
                    written_vectors = 0
                    offset = alignment * ((i+1) // node_per_page + 1)
                    # print("jump to",offset)
                    f.seek(offset)

        print(f"Graph saved to {path}")

    #从faiss输出的文件中读取图结构
    def load_from_faiss(self, path):
        #faiss存储图的结构是从83字节开始，4字节的int32存储邻接表，直到碰到0xFFFFFFFF结束
        #循环读取，直到读取到全部n个节点
        total_neighbors_count = 0
        total_nodes = 0
        with open(path, 'rb') as f:
            f.seek(83)
            for i in range(self.n):
                neighbors = []
                neighbors_count = 0
                while True:
                    neighbor = struct.unpack('I', f.read(4))[0]
                    if neighbor == 0xFFFFFFFF:
                        break
                    neighbors.append(neighbor)
                    neighbors_count += 1
                self.graph[i][:neighbors_count] = neighbors[:neighbors_count]
                total_neighbors_count += neighbors_count
                total_nodes += 1
        
        print(f'total nodes: {total_nodes}, average neighbors: {total_neighbors_count/total_nodes}')
        return total_neighbors_count/total_nodes

    #从diskann输出的文件中读取图结构
    #传入graph_vector_type表明DiskANN的图中的向量类型，和其他节点向量的类型可能不同需要单独处理
    def load_from_diskann(self, path, graph_vector_type):
        print("load graph from diskann of path:",path)
        total_neighbors_count = 0
        total_nodes = 0 #从文件中真实读取到的节点数
        with open(path, 'rb') as f:
            #获取头信息
            f.seek(8,0)
            total_nodes_in_head = struct.unpack('Q', f.read(8))[0]
            if total_nodes_in_head != self.n:
                input("Total nodes in head is not equal to n")
                pause()
            dim_in_head = struct.unpack('Q', f.read(8))[0]
            vector_size_in_diskann = dim_in_head*graph_vector_type.itemsize
            entry_point_id = struct.unpack('Q', f.read(8))[0]
            node_size_in_diskann = struct.unpack('Q', f.read(8))[0]
            node_count_per_page = struct.unpack('Q', f.read(8))[0] #每个4k页里面包含的节点数，读取时需要
            f.seek(72,0)
            file_size = struct.unpack('Q', f.read(8))[0]
            print(f"file size: {file_size}, node size: {node_size_in_diskann}, node count per page: {node_count_per_page}, vector_size_in_diskann: {vector_size_in_diskann}")
            
            #开始读取邻居
            current_page = 1 #表明当前在读哪一个page
            f.seek(current_page*diskann_page_size,0)
            for i in range(self.n):
                f.seek(vector_size_in_diskann, 1) #跳过原始向量
                neighbors_count = struct.unpack('I', f.read(4))[0]
                neighbors = []
                for n_index in range(neighbors_count):
                    neighbor_id = struct.unpack('I', f.read(4))[0]
                    neighbors.append(neighbor_id)
                self.graph[i][:neighbors_count] = neighbors[:neighbors_count]
                total_neighbors_count += neighbors_count

                total_nodes += 1
                if total_nodes%node_count_per_page == 0:
                    current_page += 1
                    f.seek(current_page*diskann_page_size,0)
        
        print(f'Load graph from diskann success, total nodes: {total_nodes}, average neighbors: {total_neighbors_count/total_nodes}')
        return total_neighbors_count/total_nodes

# 建l2图
# 1.根据索引类型选择是对聚类中心还是原始数据建图
l2_dataset = None
l2_dataset_path = None
if(build_mode == "annlite" or build_mode == "spann"):
    l2_dataset = centroids
    l2_dataset_path = cluster_center_path
elif(build_mode == "diskann" or build_mode == "aisaq"):
    l2_dataset = dataset
    l2_dataset_path = dataset_path
l2_graph = Graph(l2_dataset.shape[0], l2_graph_R)

# 2.计算l2层的gt
if evaluation and (build_mode == "annlite" or build_mode == "spann"):
    #如果l2_gt_path已经创建了则不重复创建
    if not os.path.exists(l2_gt_path):
        compute_gt_multithread(l2_dataset, queries, l2_gt_path, gt_k)

# 3.建图并加载图结构
l2_average_neibors = 0.0
if graph_type == "nsg":
    # 创建一个NSG索引
    NSG_index = faiss.IndexNSGFlat(dataset.shape[1], l2_graph_R)  # 创建NSG索引，度为R
    # 添加向量到索引，图会自动构建
    NSG_index.add(l2_dataset)
    # 写入到文件
    temp_graph_path = "nsg_index.faiss" #临时文件，只是用于转换为图结构
    faiss.write_index(NSG_index, temp_graph_path)
    # 读图
    l2_average_neibors = l2_graph.load_from_faiss(temp_graph_path)
elif graph_type == "vamana":
    if not os.path.exists(diskann_output_path):
        os.makedirs(diskann_output_path)
    #计算diskann_search_dram_budget
    diskann_search_dram_budget = ((pq_bucket+0.5) * l2_dataset.shape[0])/(1024**3)
    diskann_search_dram_budget = round(diskann_search_dram_budget,6)
    #DiskANN文档计算方法
    # diskann_search_dram_budget = (l2_dataset.shape[0] * pq_bucket) / 2**30  + (10000*(4*l2_graph_R + np.dtype(type).itemsize*l2_dataset.shape[1])) / 2**30
    #构建DiskANN索引构建指令
    diskann_build_cmd  = diskann_executor_path
    diskann_build_cmd += " --data_type"
    if(l2_dataset.dtype == np.float32):
        diskann_build_cmd += " float"
    elif(l2_dataset.dtype == np.uint8):
        diskann_build_cmd += " uint8"
    else:
        input("Unsupported data type!",l2_dataset.dtype)
    diskann_build_cmd += " --dist_fn l2" #暂没考虑其他类型的距离范式
    diskann_build_cmd += " --data_path " + l2_dataset_path
    diskann_build_cmd += " --index_path_prefix " + diskann_output_path
    diskann_build_cmd += " -R %d"%l2_graph_R
    diskann_build_cmd += " -L%d"%diskann_build_L
    diskann_build_cmd += " -B %f"%diskann_search_dram_budget
    diskann_build_cmd += " -M %d"%diskann_build_dram_limit

    #执行建图指令
    print("[ANNLite Builder]Build DiskANN by cmd: ",diskann_build_cmd)
    os.system(diskann_build_cmd)
    print("[ANNLite Builder]Build DiskANN Done")

    #读图
    l2_average_neibors = l2_graph.load_from_diskann(diskann_output_path+"_disk.index", l2_dataset.dtype)
else:
    input("Graph type not supported.")

# 4.根据不同的节点类型，往图中注入向量
node_type = l2_graph_node_type
# node_type = 0
graph_vector = None
if node_type == 0:
    graph_vector = None
elif node_type == 1:
    graph_vector = l2_dataset #Like DiskANN
elif node_type == 2:
    if(graph_type == "vamana"):
        pq_codes = read_bin(diskann_output_path+"_pq_compressed.bin", np.uint8)
    graph_vector = pq_codes
elif node_type == 3 or node_type == 4:
    print("Building AiSAQ...")
    if(graph_type == "vamana"):
        pq_codes = read_bin(diskann_output_path+"_pq_compressed.bin", np.uint8)
    graph_vector = l2_graph.build_AiSAQ(pq_codes) #Like AiSAQ
    print("Building AiSAQ Done.")
l2_graph.update_node_type(graph_vector, node_type, l2_dataset) #在图中附加向量，用于写入图

# l2_graph.print()

# 将图写入到文件
# l2_graph.save_as_bin(l2_graph_root_path+f".type{node_type}", alignment_size)
l2_graph.save_as_bin(l2_graph_root_path, alignment_size)

#读取PQ表的一些辅助函数，针对DiskANN的
def read_pq_pivots(pivots_file):
    """读取_pq_pivots.bin文件中的质心数据"""
    with open(pivots_file, 'rb') as f:
        # 读取元数据的条目数和维度数
        metadata_num = np.fromfile(f, dtype=np.uint32, count=1)[0]  # 条目数
        metadata_dim = np.fromfile(f, dtype=np.uint32, count=1)[0]  # 维度数
        assert metadata_dim == 1, f"元数据维度应为1，但实际为{metadata_dim}"
        
        # 读取元数据偏移量（修正：先读后用）
        metadata_offset = np.fromfile(f, dtype=np.uint64, count=metadata_num)
        
        # 读取全量枢轴数据（质心表）
        f.seek(metadata_offset[0])
        num_centroids = np.fromfile(f, dtype=np.uint32, count=1)[0]
        dim_per_centroid = np.fromfile(f, dtype=np.uint32, count=1)[0]
        diskann_centroids = np.fromfile(f, dtype=np.float32, count=num_centroids * dim_per_centroid)
        diskann_centroids = diskann_centroids.reshape(num_centroids, dim_per_centroid)
        
        # 读取质心向量
        f.seek(metadata_offset[1])
        num_global_centroid = np.fromfile(f, dtype=np.uint32, count=1)[0]
        dim_per_global_centroid = np.fromfile(f, dtype=np.uint32, count=1)[0]
        global_centroid = np.fromfile(f, dtype=np.float32, count=num_global_centroid*dim_per_global_centroid)
        global_centroid = global_centroid.reshape(-1, num_global_centroid)
        
        # 读取块偏移量（子空间划分）
        f.seek(metadata_offset[2])
        num_chunk_offsets = np.fromfile(f, dtype=np.uint32, count=1)[0]
        dim_chunk_offsets = np.fromfile(f, dtype=np.uint32, count=1)[0]
        chunk_offsets = np.fromfile(f, dtype=np.uint32, count=num_chunk_offsets*dim_chunk_offsets)
    
    return diskann_centroids, global_centroid, chunk_offsets

#DiskANN读到的PQ向量是一行是同一个位置的中心组成的完整向量，ANNLite的是一行是一个子空间的所有中心
#传入的pq_centroids是DiskANN读到的PQ向量，行数为向量数（通常为256,即2^8），列数为原始向量的维度
def convert_pq_centroids(pq_centroids, pq_bucket):
    pq_bucket_size = pq_centroids.shape[1] // pq_bucket #每个子空间包含多少个向量，这里没考虑不能整除的情况
    new_pq_centroids = np.zeros((pq_bucket, pq_centroids.shape[0]*pq_bucket_size), dtype=np.float32)
    #将向量写入
    for bucket_id in range(pq_bucket): #对于每个桶
        for line_number in range(pq_centroids.shape[0]): #对于每个向量
            new_pq_centroids[bucket_id][line_number*pq_bucket_size:(line_number+1)*pq_bucket_size] = pq_centroids[line_number][bucket_id*pq_bucket_size:(bucket_id+1)*pq_bucket_size]
    return new_pq_centroids

#如果是vamana图，则PQ向量需要在建图之后才能获取
if(graph_type == "vamana"):
    #读取pq压缩向量
    pq_codes = read_bin(diskann_output_path+"_pq_compressed.bin", np.uint8)
    #写入文件。这里本质上就是拷贝了一遍，但是为了保持一致、灵活性和进行检查，还是进行读取后写入的操作
    write_bin(pq_vector_path, pq_codes)
    print("write pq codes done:",pq_vector_path)

    #读取pq表
    diskann_centroids, global_centroid, chunk_offsets = read_pq_pivots(diskann_output_path+"_pq_pivots.bin")
    
    #转换格式
    new_centroids = convert_pq_centroids(diskann_centroids, pq_bucket)
    write_bin(pq_table_path, new_centroids)
    print("write pq table done:",pq_table_path)

    #写入global_centroid
    write_bin(pq_centorid_path, global_centroid)
    print("write pq centroid done:",pq_centorid_path)

#采样部分=========================================================================================
l1_graph_node_count = 0
np.random.seed(cluster_count) #设置一个伪随机的种子，让聚类数量不变时采样结果保持不变，便于观察性能
if(build_mode == "annlite"):
    l1_graph_node_count = int(l2_graph.n * l1_graph_sample_ratio) #l1层应该包含的点数
    l1_graph_node_id = None #存储采样的点的id
    if(l1_simpling_method == "random"):
        # 对L1建图.先从L2的点中采样，然后用采样的点来构建L1图
        l1_graph_node_id = np.random.choice(l2_graph.n, l1_graph_node_count, replace=False) #采样的点在L2中对应的id
        l1_cached_list = np.zeros((1,1), dtype=np.int32) #随机模式下几乎不会有已经完全缓存了的
        write_bin(l1_cachelist_path, l1_cached_list)
    elif(l1_simpling_method == "ncs"):
        #计算采样多少个点. 点集p表示抽样的点、pn表示抽样的点的邻居点集
        l1_p_count = int(l1_graph_node_count/l2_average_neibors) #这只是一个初始值，肯定是填不满l1_graph_node_count的
        #先把抽样点取出来
        l1_p_id = np.random.choice(l2_graph.n, l1_p_count, replace=False) #采样的点在L2中对应的id
        l1_p_id_set = list(int(x) for x in l1_p_id)
        print("l1 simple node count:", len(l1_p_id_set))
        #把抽样点放进l1点的集合中
        l1_id_set = set(l1_p_id_set)
        #遍历抽样点，把每个点在l2的邻居全部加入集合中
        for p_id in l1_p_id:
            pn_ids = l2_graph.graph[p_id]
            for pn_id in pn_ids:
                l1_id_set.add(int(pn_id))
        
        #进行补充
        while(len(l1_id_set) < l1_graph_node_count):
            #取一个点出来
            next_node_id = int(np.random.choice(l2_graph.n, 1)[0])
            #判断这个点是否在l1_p_id_set中.注意这里不是判断是否在最终集合中，因为只要不是在p点集中都可以增加采样数
            # print("next_node_id:",next_node_id)
            if next_node_id in l1_p_id_set:
                continue
            
            #把这个点加入l1_p_id_set
            l1_id_set.add(next_node_id)
            #把这个点对应的邻居加入l1_id_set
            pn_ids = l2_graph.graph[next_node_id]
            for pn_id in pn_ids:
                l1_id_set.add(int(pn_id))

        try:
            l1_id_set.remove(-1)
        except:
            pass

        #采样结果
        l1_id_list = list(l1_id_set) #转换为list便于处理
        print("l1_id_list len:",len(l1_id_list), "True simple ratio:", len(l1_id_list)/l2_graph.n)

        #采样结束，记录哪些点已经是被完全缓存了的
        l1_cached_list = []
        for i in range(len(l1_id_list)):
            node_id = l1_id_list[i] 
            node_neighbor_ids = l2_graph.graph[node_id]
            #计算这个点有多少个邻居，用于判断是否邻居已经全部缓存
            node_neighbor_ids_list = set(int(x) for x in node_neighbor_ids)
            try:
                node_neighbor_ids_list.remove(-1)
            except:
                pass
            node_neighbor_ids_list = list(node_neighbor_ids_list)
            neighbor_count = len(node_neighbor_ids_list)
            # print(f"node {node_id} neighbor count: {neighbor_count}")
            # print("node_neighbor_ids_list:",node_neighbor_ids_list)
            #计算邻居有多少点被缓存了
            cached_neighbor_count = 0
            for node_neighbor_id in node_neighbor_ids_list:
                if(node_neighbor_id in l1_id_set):
                    cached_neighbor_count += 1
            if cached_neighbor_count >= neighbor_count * l1_ncs_threshold: #只要缓存了多少的比例，就认为这个点已经被完全缓存了
                l1_cached_list.append(node_id)

        l1_cached_list = np.array(l1_cached_list).astype(np.uint32).reshape(-1,1)
        write_bin(l1_cachelist_path, l1_cached_list)
        #将l1_id_list转为numpy数组
        l1_graph_node_id = np.array(l1_id_list)
        print("l1_graph_node_id.shape:", l1_graph_node_id.shape)

    else:
        input("l1_simpling_method error!!")

    #修正l1_graph_node_count的值，因为ncs采样后的节点数量会变化
    l1_graph_node_count = l1_graph_node_id.shape[0]
    l1_graph_node = np.zeros((l1_graph_node_count, dataset.shape[1]), dtype=l2_dataset.dtype) #采样的点的向量
    l1_graph_node_pq = np.zeros((l1_graph_node_count, pq_codes.shape[1]), dtype=pq_codes.dtype) #这个用来存储pq向量，不能用这个直接建图
    #将这些点对应的向量写入l1_graph_node
    for i in range(l1_graph_node_count):
        l1_graph_node[i] = l2_dataset[l1_graph_node_id[i]]
        if(l1_graph_node_type == 2):
            l1_graph_node_pq[i] = pq_codes[l1_graph_node_id[i]] # 这里可以选择是否使用PQ编码的向量
    #写入文件.正常是附加在图中的，不用写入
    # write_bin(l1_graph_vector_path, l1_graph_node)

    print("l1_graph_node.shape:", l1_graph_node.shape)
    # 计算l1层的gt
    if evaluation:
        compute_gt_multithread(l1_graph_node, queries, l1_gt_path, gt_k)

    #建图部分=========================================================================================
    #对l1_graph_node建图
    l1_graph = Graph(l1_graph_node.shape[0], l1_graph_R)
    if graph_type == "nsg":
        print("Build NSG graph...")
        l1_NSG_index = faiss.IndexNSGFlat(l1_graph_node.shape[1], l1_graph_R)  # 创建NSG索引，度为R 
        l1_NSG_index.add(l1_graph_node)
        # 写入到文件
        temp_graph_path = "l1_nsg_index.faiss" #临时文件，只是用于转换为图结构
        faiss.write_index(l1_NSG_index, temp_graph_path)
        # 从文件中读取图
        l1_graph.load_from_faiss(temp_graph_path)
    elif graph_type == "vamana":
        print("Build Vamana graph...")

        #先把l1_graph_node写入文件用于建图
        l1_raw_vectors_path = "l1_raw_vectors_temp.bin"
        write_bin(l1_raw_vectors_path, l1_graph_node)

        l1_diskann_output_path = diskann_output_path+"_l1/"
        if not os.path.exists(l1_diskann_output_path):
            os.makedirs(l1_diskann_output_path)
        #计算diskann_search_dram_budget,L1建图无限制
        diskann_search_dram_budget = 100
        #构建DiskANN索引构建指令
        diskann_build_cmd  = diskann_executor_path
        diskann_build_cmd += " --data_type"
        if(l1_graph_node.dtype == np.float32):
            diskann_build_cmd += " float"
        elif(l1_graph_node.dtype == np.uint8):
            diskann_build_cmd += " uint8"
        else:
            raise Exception("Unsupported data type!",l1_graph_node.dtype)

        diskann_build_cmd += " --dist_fn l2" #暂没考虑其他类型的距离范式
        diskann_build_cmd += " --data_path " + l1_raw_vectors_path
        diskann_build_cmd += " --index_path_prefix " + l1_diskann_output_path
        diskann_build_cmd += " -R %d"%l1_graph_R
        diskann_build_cmd += " -L%d"%diskann_build_L
        diskann_build_cmd += " -B %f"%diskann_search_dram_budget
        diskann_build_cmd += " -M %d"%diskann_build_dram_limit

        #执行建图指令
        print("[ANNLite Builder]Build DiskANN by cmd: ",diskann_build_cmd)
        os.system(diskann_build_cmd)
        print("[ANNLite Builder]Build DiskANN Done")

        #读图
        l1_graph.load_from_diskann(l1_diskann_output_path+"_disk.index", l1_graph_node.dtype)
    else:
        input("Graph type not supported.")

    #写入l1图，因为l1图是要常驻内存的所以直接把向量接在后面即可
    if(l1_graph_node_type==1):
        l1_graph.update_node_type(l1_graph_node, l1_graph_node_type) #在图中附加向量，用于写入图
    elif(l1_graph_node_type==2):
        l1_graph.update_node_type(l1_graph_node_pq, l1_graph_node_type) #在图中附加PQ向量，用于写入图
    l1_graph.save_as_bin(l1_graph_path, alignment_size)

    #l1_graph_node_id转化为int32，并且reshape.
    #这个相当于L1和L2之间的映射关系
    l1_graph_node_id = l1_graph_node_id.astype(np.int32).reshape(-1,1)
    write_bin(l1_l2_graph_node_mapping_path, l1_graph_node_id)

#使用json的方式写入配置参数
import json

#不写入数据集路径了，和数据集相关的现在放到了用户配置文件中
config = {
    "index_mode": build_mode_index,
    "dataset_path": dataset_path,
    "query_path": query_path,
    "gt_path": gt_path,
    "build_mode":build_mode,
    "graph_type":graph_type,
    "cluster_center_path": cluster_center_path.replace(output_folder, ""),
    "offset_list_path": offset_list_path.replace(output_folder, ""),
    "last_layer_path": last_layer_path.replace(output_folder, ""),
    "pq_vector_path": pq_vector_path.replace(output_folder, ""),
    "pq_table_path": pq_table_path.replace(output_folder, ""),
    "pq_centorid_path": pq_centorid_path.replace(output_folder, ""),
    "l2_graph_root_path": l2_graph_root_path.replace(output_folder, ""),
    "cluster_count": cluster_count,
    "train_ratio": train_ratio,
    "k": k,
    "nprobe": nprobe,
    "l2_graph_R": l2_graph_R,
    "vector_count": dataset.shape[0],
    "dim": dataset.shape[1],
    "feature_type": str(dataset.dtype),
    "pq_bucket": pq_bucket,
    "pq_bit": pq_bit,
    "alignment_size": alignment_size,
    "l1_graph_path": l1_graph_path.replace(output_folder, ""),
    "l1_l2_graph_node_mapping_path": l1_l2_graph_node_mapping_path.replace(output_folder, ""),
    "l1_graph_R": l1_graph_R,
    "l1_graph_node_count": l1_graph_node_count,
    "l1_graph_node_type": l1_graph_node_type,
    "l2_graph_node_type": l2_graph_node_type,
    "l1_gt_path":  l1_gt_path.replace(output_folder, ""),
    "l2_gt_path": l2_gt_path.replace(output_folder, ""),
    "l1_cachelist_path": l1_cachelist_path.replace(output_folder, ""),
    "l2_average_neibors": l2_average_neibors
}

with open(config_path, "w") as f:
    json.dump(config, f, indent=4)

#如果output_folder目录下存在buildtime开头的文件，则删除该文件
import os
for f in os.listdir(output_folder):
    if f.startswith("buildtime"):
        os.remove(output_folder+f)
#以当前的日期时间作为文件名新建一个文件，以标记该索引的创建时间
import datetime
now = datetime.datetime.now()
f = open(output_folder+"buildtime-"+now.strftime("%Y-%m-%d-%H:%M"), 'w')
f.close()

with open("indexpath.csv","a") as f:
    f.write(output_folder+",\n")

print(f"Build Finish! Output to {output_folder}")
#将输出路径写入到临时文件中用于传递参数给batch_build