#pragma once
#include <vector>
#include <iostream>
#include <fstream>
#include <chrono>
#include <thread> 
#include <cmath>
#include <sys/stat.h>

#include "minlist.h"
#include "bloomfilter.h"
#include "io_engine.h"

#define CTYPE float //聚类中心的类型，一般均为float
#define PQTYPE uint8_t //PQ压缩向量的类型，一般均为uint8_t
#define PQ_DECODE_TYPE float //PQ向量解码后的向量，一般均为float
#define ISPRINT 0
#define INF 0xFFFFFF
#define MAX_BEAMSEARCH_WIDTH 32 //Beam search的最大宽度

#include "annlite_utils.h"
#define DISTANCE l2_distance_general //l2_distance_general包含多种实现，可以处理不同数据类型的计算

using namespace std;
using TimePoint = std::chrono::time_point<std::chrono::high_resolution_clock>;

//T代表存储的数据的类型
template <class T>
class Bin{
private:
    int n; //包含多少行
    int dim; //每行数据的维数，类型为T
    int load_mode; //0表示全量加载;1表示已分配全量内存但未加载文件;2表示按需加载，data已分配一行的空间
    T* data=NULL;

    int thread_count; //表示这个类包含多少个线程的资源，目前仅针对按需加载（即用init_with_file初始化）的场景。
    std::vector<std::ifstream> fp_array;   //创建文件句柄数组，为每个线程分配一个文件句柄
    std::vector<T*> buf_array;  //为每个线程分配一个读buffer

public:
    Bin(int in_n=0, int in_dim=0){ //初始化函数基本没用
        if(in_n!=0 && in_dim!=0){ //如果初始化时就指定了大小
            n = in_n;
            dim = in_dim;
            data = new T[n*dim];
            load_mode = 1; //如果指定了大小表示是全量加载
        }
        load_mode = -1;
    }

    bool is_init(){
        if(load_mode == -1)
            return false;
        else
            return true;
    }

    int get_n(){
        return n;
    }

    int get_dim(){
        return dim;
    }

    //读取line，在并发场景下为了避免冲突可以使用用户传入的buf存储读取的数据（主要是针对load_mode 2）
    T* get_line(int line_number, int thread_id=-1){
        if(data==NULL){
            std::cout << "[Error]Bin::getline data is null" << std::endl;
            exit(1);
        }
        if(load_mode <= 1)
            return data + line_number*dim;
        else if(load_mode == 2){
            if(thread_id == -1){
                std::cout << "[Error]Bin::getline thread_id not given" << std::endl;
                exit(1);
            }
            if(thread_id >= thread_count){
                std::cout << "[Error]Bin::getline thread_id out of range" << std::endl;
                exit(1);
            }

            //按需加载
            ifstream& fp = fp_array[thread_id];
            if(!fp.is_open()){
                std::cout << "[Error]Bin::getline file is not open" << std::endl;
                exit(1);
            }
            fp.seekg(8 + line_number*dim*sizeof(T), ios::beg);
            //读取数据到buf中
            T* buf = buf_array[thread_id];
            fp.read((char*)buf, sizeof(T)*dim);
            return buf;
        }
        else{
            std::cout << "[Error]Bin::getline load_mode error" << std::endl;
            exit(1);
        }
    }

    //获取某一行的值的累加和，主要是用于统计结果
    T get_line_sum(int line_number){
        if(data==NULL){
            std::cout << "[Error]Bin::get_line_sum data is null" << std::endl;
            exit(1);
        }
        if(load_mode <= 1){
            T* line = data + line_number*dim;
            T sum = 0;
            for(int i=0; i<dim; i++){
                sum += line[i];
            }
            return sum;
        }
        else{
            std::cout << "[Error]Bin::get_line_sum load_mode error" << std::endl;
            exit(1);
        }
    }

    //一口气全部加载进内存，对应load_mode=0
    int load_fromfile(string filename){
        load_mode = 0;
        //以二进制形式打开并且采用directIO模式，并读取前4个字节
        ifstream fp(filename, ios::binary);
        if(!fp.is_open()){
            std::cerr << "[Bin::load_fromfile]无法打开文件" << filename << std::endl;
            return -1;
        }
        fp.read((char*)(&n), sizeof(int));
        fp.read((char*)(&dim), sizeof(int));

        cout << "Load BIN file: " << filename << endl;
        cout << "|-n: " << n << " dim: " << dim << endl;

        data = new T[n*dim];
        fp.read((char*)data, sizeof(T)*n*dim);

        fp.close();

        return 0;
    }

    //不依附于某个文件，用于存储结果等，对应load_mode=1
    int init_without_file(int input_n, int input_dim){
        load_mode = 1;
        n = input_n;
        dim = input_dim;

        cout << "Init BIN with " << input_n << " rows and " << input_dim << " columns" << endl;

        data = new T[n*dim]();
        memset(data, 0, sizeof(T)*n*dim);

        return 0;
    }

    //使用Bin文件对其进行初始化
    //如果load_all_data为1，则表示全量加载（默认，为与load_fromfile接口兼容），使用load_fromfile对其进行初始化，对应load_mode=0
    //如果load_all_data为0，则按照按需加载模式进行初始化，对应load_mode=2,不加载实际数据
    int init_with_file(string filename, int thread_count=1, int load_all_data=1){
        this->thread_count = thread_count;

        if(load_all_data == 1){
            return load_fromfile(filename);
        }
        else if(load_all_data != 0){
            std::cout << "[Bin::init_with_file]load_all_data must be 0 or 1" << std::endl;
            exit(1);
        }

        load_mode = 2; //使用init_with_file则表示按需加载
        ifstream fp(filename, ios::binary);
        if(!fp.is_open()){
            std::cout << "[Bin::init_with_file]无法打开文件" << filename << std::endl;
            return -1;
        }
        fp.read((char*)(&n), sizeof(int));
        fp.read((char*)(&dim), sizeof(int));

        cout << "Init BIN file without data loading: " << filename << endl;
        cout << "|-n: " << n << " dim: " << dim << " threads:" << this->thread_count << endl;

        data = new T[dim]; //只初始化一行
        fp.close();

        //为每个线程分配文件句柄和读buffer
        for(int i=0; i<thread_count; i++){
            ifstream fp(filename, ios::binary);
            if(!fp.is_open()){
                std::cout << "[Bin::init_with_file]无法打开文件" << filename << std::endl;
                exit(1);
            }
            fp_array.push_back(std::move(fp));

            T* data = new T[dim];
            buf_array.push_back(data);
        }

        return 0;
    }

    //如果这个Bin存的是向量，可以用这个函数来遍历每个聚类中心以找到距离
    //相当于一个暴力向量检索方法
    //result_list是用户传入的，增强灵活性，以及避免多线程问题
    int scan_topk(T* query, MinList& result_list, int thread_id=0){
        if(!is_init()){
            std::cout << "[ERROR!]Bin's data is null" << std::endl;
            exit(1);
        }
        if(result_list.get_count() != 0){
            std::cout << "[ERROR!]result_list is not empty" << std::endl;
            exit(1);
        }

        MinListEntry temp;
        for(int id=0; id<n; id++){
            T* line = get_line(id, thread_id);
            float distance = DISTANCE(query, line, dim);
            temp.id = id;
            temp.distance = distance;
            temp.flag = FLAG_VISITED;
            result_list.insert(&temp);
        }

        return 0;
    }

    void print(int row=-1, int col=-1, int force_print=0){ //=-1则表示全部打印
        if(this->data==NULL){
            std::cout << "[ERROR!]Bin's data is null" << std::endl;
            return;
        }
        if(ISPRINT==0 && force_print==0)
            return;
        if(row==-1)
            row = n;
        if(col==-1)
            col = dim;
        if(row > n)
            row = n;
        if(col > dim)
            col = dim;
        for(int i=0; i<row; i++){
            std::cout << "|-row: " << i << ": ";
            for(int j=0; j<col; j++)
                //固定数字宽度为2
                std::cout  << std::setw(3) << data[i*dim+j] << " ";
            std::cout << std::endl;
        }
    }

    void print_line(int line_number, int limit=10){
        if(this->data==NULL){
            std::cout << "[ERROR!]Bin's data is null" << std::endl;
            return;
        }

        T* line = get_line(line_number);

        if(limit>dim)
            limit = dim;
        std::cout << "[print_line]line " << line_number << ": ";
        for(int j=0; j<limit; j++)
            //固定数字宽度为2
            std::cout  << std::setw(3)<< line[j] << " ";
        std::cout << std::endl;
    }

    //用于设置值，主要是把minlist的结果写进去到这个Bin里面，用于Bin存储结果的情况
    //将minlist的值写入到第line_number行
    void set_line_minlist(int line_number, MinList& minlist){
        //首先检查维数是否相同
        if(dim!=minlist.get_size()){
            std::cerr << "[Bin::set_line_minlist]dim not match" << std::endl;
            return;
        }

        T* line = get_line(line_number);
        for(int i=0; i<dim; i++){
            int value = minlist.get_id(i);
            line[i] = value;
        }
    }

    //针对Bin中存储的是映射表的情况
    //例如当L1层完成检索之后，需要将L1层的id映射为第二层
    void minlist_mapping(MinList& minlist){
        if(data==NULL){
            std::cerr << "[ERROR!]Bin::mapping_to_minlist data is null" << std::endl;
            exit(1);
        }
        for(int i=0; i<minlist.get_count(); i++){
            //映射id
            int id = minlist.get_id(i);
            int new_id = get_line(id)[0];
            minlist.set_id(i, new_id);
            //映射parent_id
            #ifdef FAT_MINLIST_ENTRY
            int parent_id = minlist.get_parent_id(i);
            int new_parent_id = get_line(parent_id)[0];
            minlist.set_parent_id(i, new_parent_id);
            #endif
        }
    }

    void set_value(int line_number, int index, T value){
        if(data==NULL){
            std::cerr << "[ERROR!]Bin's data is null" << std::endl;
            return;
        }
        T* line = get_line(line_number);
        line[index] = value;
    }

    ~Bin(){
        if(load_mode == 2){ //按需加载模式下，释放每个线程的buf
            for(int i=0; i<thread_count; i++){
                if(fp_array[i].is_open())
                    fp_array[i].close();
                if(buf_array[i]!=NULL)
                    delete[] buf_array[i];
            }
        }
        if(data!=NULL)
            delete[] data;
    }
};

//性能统计器，统计召回率、延迟等
class Performance_Analyser{
public:
    int n;
    int output_to_file;
    string output_path;
    string output_path_detail;

    int record_global_on; //是否记录全局时间（对性能影响较小）
    int record_detail_on; //是否记录每个请求的延迟、IO时间、计算时间（对性能有一定影响）

    //记录时间
    TimePoint start_time;
    TimePoint end_time;
    Bin<double> times;
    //记录召回率
    float recall_rate; //存储总的平均召回率
    Bin<float> recall_rates; //存储每个请求的召回率
    //记录IO次数
    Bin<int> io_count; //存储每个请求在每一层的IO次数，发起一次IO就+1
    Bin<int> visited_node_count; //存储每个请求在每一层访问过的节点数，简单来说就是调用过minlist.insert的次数
    //记录L1L2重合率
    Bin<float> overlap_ratio; 
    //一些杂项
    Bin<float> trivial; //用于记录一些不太重要的值，不设置名字，用户可以方便的新增值

    Performance_Analyser(){
        record_global_on = 1; 
        record_detail_on = 1;
    }

    ~Performance_Analyser(){
    }

    void init(json user_config, json dataset_config){
        n = user_config["max_query"];
        output_to_file = user_config["output_to_file"];
        output_path = user_config["output_path"];
        output_path_detail = user_config["output_path_detail"];

        recall_rates.init_without_file(n, 3); //保存每一层的召回率
        io_count.init_without_file(n, 3);
        visited_node_count.init_without_file(n, 3);
        times.init_without_file(n, 10); //0:每个请求的总延迟; 1、2、3分别为L1的总时间、计算时间、IO时间; 以此类推
        overlap_ratio.init_without_file(n, 1);
        trivial.init_without_file(n, 3); //可以按需增加。目前的定义是：[0]：L2节约的IO次数; [1]：L2的IO大小(B); [2]：L3的IO大小(B)
    }

    void set_recall_rate(int qid, int layer, float rate){
        recall_rates.set_value(qid, layer, rate); //写入某个请求在某一层的召回率
    }

    void add_io_count(int qid, int layer, int increment){
        int new_count = io_count.get_line(qid)[layer] + increment;
        io_count.set_value(qid, layer, new_count);
    }

    void add_visited_count(int qid, int layer, int increment){
        int new_count = visited_node_count.get_line(qid)[layer] + increment;
        visited_node_count.set_value(qid, layer, new_count);
    }

    void add_time(int qid, int offset, double increment){
        double new_time = times.get_line(qid)[offset] + increment;
        times.set_value(qid, offset, new_time);
    }

    void set_overlap_ratio(int qid, int layer, float ratio){
        overlap_ratio.set_value(qid, layer, ratio);
    }

    void set_trivial_value(int qid, int index, float value){
        trivial.set_value(qid, index, value);
    }

    void add_trivial_value(int qid, int index, float increment){
        float new_value = trivial.get_line(qid)[index] + increment;
        trivial.set_value(qid, index, new_value);
    }

    void mark_start(){
        start_time = std::chrono::high_resolution_clock::now();
    }

    void mark_end(){
        end_time = std::chrono::high_resolution_clock::now();
    }

    TimePoint get_time(){ //用于每个线程灵活统计时间
        return std::chrono::high_resolution_clock::now();
    }
    
    double elapsed_time(TimePoint start, TimePoint end){
        auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
        return static_cast<double>(duration.count()) / 1000.0; // 转换为毫秒
    }

    void print_results(){
        std::ostringstream output_stream; //字符串流

        //统计各项指标
        //计算总时间
        double total_time = elapsed_time(start_time, end_time);
        //计算总IO次数和平均IO次数
        int total_io_count = 0;
        int total_visited_node_count = 0;
        for(int i=0; i<n; i++){
            total_io_count += io_count.get_line_sum(i);
            total_visited_node_count += visited_node_count.get_line_sum(i);
        }

        output_stream << "=====================QuerySet Performance:===================" << endl;
        output_stream << "Total query: " << n << endl;
        output_stream << "Recall Rate: " << recall_rate << endl;
        output_stream << "Total Time: " << total_time << " ms, Average Time: " << (float)total_time/n << " ms" << endl;
        output_stream << "Total IO Count: " << total_io_count << ", Average IO Count: " << (float)total_io_count/n << endl;
        output_stream << "Total Visited Node: " << total_visited_node_count << ", Average IO Volumn: " << (float)total_visited_node_count/n << endl;

        if(output_to_file){
            output_stream << "Output to file: " << output_path << endl;
        }

        output_stream << "=============================================================" << endl;

        std::cout << output_stream.str();
        if(output_to_file){
            std::ofstream output_file(output_path);
            if (output_file.is_open()) {
                output_file << output_stream.str();
                output_file.close();
            } else {
                std::cerr << "Failed to open output file." << std::endl;
            }
        }

        //开始生成详细结果，这些结果只写入文件
        if(output_to_file){
            //清空output_stream
            output_stream.str("");
            //写入表头
            output_stream <<    "query_id, ";
            output_stream <<    "recall_rate(L1), recall_rate(L2), recall_rate(L3), ";
            output_stream <<    "io_count(L1), io_count(L2), io_count(L3), ";
            output_stream <<    "visited_node_count(L1), visited_node_count(L2), visited_node_count(L3), ";
            output_stream <<    "total_time(ms), ";
            output_stream <<    "L1 total, L1 compute, L1 load, ";
            output_stream <<    "L2 total, L2 compute, L2 load, ";
            output_stream <<    "L3 total, L3 compute, L3 load, "; 
            output_stream <<    "L1/L2 overlap ratio" ;
            for(int i=0; i<trivial.get_dim(); i++){
                output_stream <<  ", trivial-" << i ;
            }
            output_stream << endl;
            for(int i=0; i<n; i++){
                output_stream << i;
                for(int j=0; j<recall_rates.get_dim(); j++){
                    output_stream << ", " << recall_rates.get_line(i)[j];
                }
                for(int j=0; j<io_count.get_dim(); j++){
                    output_stream << ", " << io_count.get_line(i)[j];
                }
                for(int j=0; j<visited_node_count.get_dim(); j++){
                    output_stream << ", " << visited_node_count.get_line(i)[j];
                }
                for(int j=0; j<times.get_dim(); j++){
                    output_stream << ", " << times.get_line(i)[j];
                }
                for(int j=0; j<overlap_ratio.get_dim(); j++){
                    output_stream << ", " << overlap_ratio.get_line(i)[j];
                }
                for(int j=0; j<trivial.get_dim(); j++){
                    output_stream << ", " << trivial.get_line(i)[j];
                }
                output_stream << endl;
            }

            //写入文件
            std::ofstream output_file(output_path_detail);
            if (output_file.is_open()) {
                output_file << output_stream.str();
                output_file.close();
            } else {
                std::cerr << "Failed to open output file." << std::endl;
            }
        }

        if(output_to_file){
            //执行python result_analyser.py
            int i = system("python3 result_analyser.py");
            if(i!=0){
                std::cerr << "Failed to execute python script." << std::endl;
            }
        }
    }
};

//请求集，负责存储请求、结果以及计算召回率
//T代表数据集、请求集中向量的类型
template <class T>
class QuerySet{
private:
    //query:
    Bin<int> gts;
    string query_path;
    string gt_path;

public:
    Bin<T> querys;
    Bin<int> results;
    Performance_Analyser perf; //存储所有的运行结果，包括召回率、每层延迟等。同时也承担计时的功能

    //global
    int n; //数据集中的真实查询数量
    int dim;
    int recall_k;
    int real_k;
    int query_count; //配置文件中指定的查询数量
    int last_layer_nprobe;

    QuerySet(json user_config, json dataset_config){
        //先看user_config里面是否有指定的query_path和gt_path，有的话用user指定的，没有的话用dataset_config里面的
        try
        {
            query_path = string(user_config["query_path"]);
            gt_path = string(user_config["gt_path"]);
            cout << "Load Query and GT file from user config" << endl;
        }
        catch(const std::exception& e)
        {
            query_path = string(dataset_config["query_path"]);
            gt_path = string(dataset_config["gt_path"]);
            cout << "Load Query and GT file from dataset config" << endl;
        }

        querys.init_with_file(query_path);
        querys.print(10,10);
        
        gts.init_with_file(gt_path);
        
        n = querys.get_n();
        dim = querys.get_dim();
        real_k = user_config["real_k"]; //候选列表的真实长度
        recall_k = user_config["recall_k"]; //计算召回率时的k

        results.init_without_file(n, real_k);
        results.print(10,10);

        query_count = user_config["max_query"];
        query_count = ann_min(n,query_count);
        
        last_layer_nprobe = user_config["last_layer_nprobe"];

        perf.init(user_config, dataset_config); //初始化性能监测器
    }

    ~QuerySet(){
    }

    //在保证结果可以做到按照真实距离严格排序时可以使用该函数计算recall，复杂度为O(n),速度快
    float compute_recall_line(int line_number){
        //读取结果值和gt值的行
        int* result_line = results.get_line(line_number);
        int* gt_line = gts.get_line(line_number);

        int same = 0;
        int next_gt = 0;
        for(int i=0; i<recall_k; i++){ //对于向量i的检索结果中的第j个近似向量
            while(next_gt < recall_k){ 
                if(result_line[i] == gt_line[next_gt]){
                    same++;
                    next_gt++;
                    break;
                }
                next_gt++;
            }
            if(next_gt >= recall_k){
                break;
            }
        }
        
        return ((float)same)/recall_k;
    }

    //乱序版计算召回率，当检索结果不保证顺序时使用
    //例如在仅使用PQ的方法中，PQ计算出的顺序可能和真实的顺序不一致，但是他们也是topk
    //计算方法变成依次查找gt_line中的每一个结果是否在result_line中，复杂度变成了O(n^2)
    float compute_recall_line_outorder(int line_number){
        //读取结果值和gt值的行
        int* result_line = results.get_line(line_number);
        int* gt_line = gts.get_line(line_number);

        int same = 0;
        for(int i=0; i<recall_k; i++){
            for(int j=0; j<recall_k; j++){
                if(result_line[i] == gt_line[j]){
                    same++;
                    break;
                }
            }
        }
        
        return ((float)same)/recall_k;
    }

    //计算所有最终结果的召回率
    float compute_recall(){
        float recalls = 0.0; //召回率的汇总

        for(int i=0; i<query_count; i++){
            float recall = compute_recall_line_outorder(i);
            perf.set_recall_rate(i, 2, recall); //最终召回率，写入到L3中
            // cout << "query " << i << " recall: " << recall << endl;
            recalls += recall;
        }

        return recalls/query_count;
    }

    //用于外部调用的功能函数，外部传入gt数组和结果列表计算召回率。
    float compute_recall_by_outer_gts(int q_id, Bin<int>& outer_gts, MinList& outer_minlist, int input_k=INF){
        int temp_k = input_k; 
        temp_k = std::min(temp_k, outer_minlist.get_count()); 
        temp_k = std::min(temp_k, outer_gts.get_dim()); 
        if(temp_k <= 0){
            cout << "[compute_recall_by_outer_gts]Error: k value error: " << temp_k;
            cout << ". Maybe gt file not exist" << endl;
            exit(0);
        }
        //检查外部的gt数组是否足够长度
        if(temp_k > outer_gts.get_dim()){
            cout << "[compute_recall_by_outer_gts]Error: outer_gts is not enough. ";
            cout << "outer_gts.dim=" << outer_gts.get_dim() << ", outer_minlist count=" << temp_k << endl;
            exit(0);
        }

        //获取gt值行
        int* gt_line = outer_gts.get_line(q_id);
        //获取结果行
        int* result_line = new int[outer_minlist.get_count()];
        outer_minlist.write_id_to_list(result_line);
        //计算召回率
        int same = 0;
        for(int i=0; i<temp_k; i++){
            for(int j=0; j<temp_k; j++){
                if(result_line[i] == gt_line[j]){
                    same++;
                    break;
                }
            }
        }
        
        delete[] result_line;
        return ((float)same)/temp_k;
    }

    //输出性能结果
    int output_results(){
        perf.recall_rate = compute_recall();
        perf.print_results();
        return 0;
    }
};

//用于保存从底层读取上来的聚类向量。其结构为id+vectors
//T代表存储向量的类型
template <class T>
class LastLayerVectors{
private:
    int dim; //向量的维数
    int element_size; //每个向量所占的大小（便于分配空间）
    int nprobe; //用于预先分配IOManager的空间

    int thread_count;
    std::vector<int> fd_array;   //创建文件句柄数组，为每个线程分配一个文件句柄
    std::vector<IOUringManager_ASYNC> io_manager_array; //每个线程分配一个IOUringManager
    
    string vector_path;
    string offset_list_path;

    Bin<int> L3_offset_list; //偏移量数组，用于读取聚类的向量

public:
    int init(json user_config, json dataset_config){
        thread_count = user_config["thread_count"];
        dim = dataset_config["dim"];
        nprobe = user_config["last_layer_nprobe"];
        element_size = sizeof(int)+dim*sizeof(T); //一个向量占据的空间大小，单位为B

        vector_path = string(user_config["index_root_path"])+string(dataset_config["last_layer_path"]);
        offset_list_path = string(user_config["index_root_path"])+string(dataset_config["offset_list_path"]);

        //初始化偏移列表和文件句柄
        L3_offset_list.init_with_file(offset_list_path); //全量加载
        for(int i=0; i<thread_count; i++){
            int fd = open(vector_path.c_str(), O_RDONLY | O_DIRECT);
            if (fd == -1) {
                std::cerr << "Failed to open file: " << vector_path << std::endl;
                exit(1);
            }
            fd_array.push_back(fd);
            // cout << "fd of " << i << ": " << fd << endl;
            // IOUringManager io_manager(fd, nprobe);
            // io_manager_array.push_back(std::move(io_manager));
        }
        io_manager_array.reserve(thread_count);
        for (int t = 0; t < thread_count; ++t) {
            io_manager_array.emplace_back(fd_array[t], nprobe);
        }

        return 0;
    }

    //根据聚类id读取聚类向量。返回这个聚类中包含的向量数
    //传入的buf是已经分配空间的，如果空间不足则delete掉然后重新分配
    char* load_cluster(int c_id, char*& aligned_buf, int& aligned_buf_length, int& buf_entry_count, int thread_id=0){
        //首先根据偏移数组获取偏移量以及长度
        int offset = L3_offset_list.get_line(c_id)[0];
        int length = L3_offset_list.get_line(c_id+1)[0] - offset;
        int aligned_length = aligned_size(length*element_size); //对齐长度，表示要对齐地读取这个聚类最少需要多大的内存
        buf_entry_count = length; //设置返回值

        //检查传入buf的大小
        if(aligned_buf_length < aligned_length){ //如果传入的buf的大小不足
            delete[] aligned_buf; //释放原有的buf
            aligned_buf_length = aligned_size(1.2*aligned_length); //重新分配空间，稍微分配得大一点避免反复分配
            aligned_buf = (char*)get_aligned_buffer(aligned_buf_length); 
            // cout << "Reallocate buffer length: " << aligned_buf_length << endl;
        }

        //开始读取
        int aligned_offset = aligned_offset_in_file(offset*element_size); //计算读取起始偏移量
        lseek(fd_array[thread_id], aligned_offset, SEEK_SET); //设置偏移量
        int ret = read(fd_array[thread_id], aligned_buf, aligned_length);
        if(ret == -1){
            std::cerr << "load_cluster Failed to read file" << endl;
            exit(1);
        }

        return aligned_buf+aligned_offset_in_buf(offset*element_size); //返回buf的指针
    }

    //根据load的聚类解析出id和向量
    int get_vector(char* buf, int index, int& id, T*& vector_buf){
        char* head = buf + index*element_size;

        id = *(int*)head;
        vector_buf = (T*)(head + sizeof(int));

        return 0;
    }

    //获取到数据buf后，使用里面的数据更新minlist
    int update_minlist_by_buf(T* query, MinList& result_list, char* buf, int buf_entry_count){
        MinListEntry temp;
        for(int i=0; i<buf_entry_count; i++){ //对于每个向量
            int id;
            T* vector_buf;
            //根据buf获取对应的向量
            get_vector(buf, i, id, vector_buf);
            //计算距离
            // cout << "vector_buf addr: " << vector_buf << " id:" << id << " i:" << i <<  endl;
            float distance = DISTANCE(query, vector_buf, dim);
            
            temp.id = id;
            temp.distance = distance;
            result_list.insert(&temp);
        }
        return 0;
    }

    //由用户调用，根据cid列表和查询向量获取topk
    //perf是性能分析器，q_id是查询的id，layer是当前层数
    int retrival_topk(T* query, MinList& cid_list, MinList& result_list, Performance_Analyser& perf, int q_id, int layer,int thread_id=0){
        int aligned_buf_length = aligned_size(1500*element_size); //初始分配1000个向量的内存空间
        char* aligned_buf = (char*)get_aligned_buffer(aligned_buf_length); //预先分配
        for(int i=0; i<cid_list.get_count(); i++){ //对于每一个聚类
            int c_id = cid_list.get_id(i); //获取聚类中心id
            auto st_load_cluster = perf.get_time();
            int buf_entry_count=0; //用于获取当前聚类有多少个向量
            char* buf = load_cluster(c_id, aligned_buf, aligned_buf_length, buf_entry_count, thread_id);
            // cout << "qid" << q_id << " aligned_buf_length: " << aligned_buf_length << " buf_entry_count: " << buf_entry_count << endl;
            auto et_load_cluster = perf.get_time();
            perf.add_time(q_id, 1+(layer*3+2), perf.elapsed_time(st_load_cluster, et_load_cluster));
            perf.add_io_count(q_id, layer, 1); //增加IO次数
            perf.add_visited_count(q_id, layer, buf_entry_count); //增加读取向量数

            auto st_compute_cluster = perf.get_time();
            update_minlist_by_buf(query, result_list, buf, buf_entry_count); //更新minlist
            auto et_compute_cluster = perf.get_time();
            perf.add_time(q_id, 1+(layer*3+1), perf.elapsed_time(st_compute_cluster, et_compute_cluster));
        }
        delete[] aligned_buf; //释放buf
        return 0;
    }

    //使用io_uring读取文件
    int retrival_topk_batch(T* query, MinList& cid_list, MinList& result_list, Performance_Analyser& perf, int q_id, int layer,int thread_id=0){
        //1.首先构建偏移量和长度数组，并且构建buffer
        vector<long long> offset_list;
        vector<long long> length_list;
        vector<long long> aligned_offset_list;
        vector<long long> aligned_length_list;
        vector<void*> buf_list;
        vector<int> ids;
        for(int i=0; i<cid_list.get_count(); i++){ //对于每一个聚类
            int c_id = cid_list.get_id(i); //获取聚类中心id
            long long offset = L3_offset_list.get_line(c_id)[0];
            long long length = L3_offset_list.get_line(c_id+1)[0] - offset;
            offset_list.push_back(offset*element_size); //偏移量,以B为单位
            length_list.push_back(length); //包含了多少个entry

            aligned_offset_list.push_back(aligned_offset_in_file(offset*element_size)); //对齐偏移量（单位B）
            int aligned_io_size = aligned_size(length*element_size, offset_list[i]);
            aligned_length_list.push_back(aligned_io_size); //entry的对齐的长度（单位B）
            buf_list.push_back(get_aligned_buffer(length*element_size, offset_list[i])); //分配空间

            perf.add_io_count(q_id, layer, 1); //增加IO次数
            perf.add_visited_count(q_id, layer, length); //增加读取向量数

            ids.push_back(i);
            perf.add_trivial_value(q_id, 2, aligned_io_size);
        }
        //把offset_list、length_list、aligned_offset_list和aligned_length_list都打印出来
        // cout << "element_size: " << element_size << endl;
        // cout << "buf_list: ";
        // print_array(buf_list, cid_list.get_count());
        // cout << "length_list: ";
        // print_array(length_list, cid_list.get_count());
        // cout << "aligned_length_list: ";
        // print_array(aligned_length_list, cid_list.get_count());
        // cout << "offset_list: ";
        // print_array(offset_list, cid_list.get_count());
        // cout << "aligned_offset_list: ";
        // print_array(aligned_offset_list, cid_list.get_count());

        //2.提交IO请求
        IOUringManager_ASYNC& manager = io_manager_array[thread_id]; //方法1
        // IOUringManager_ASYNC manager(fd_array[thread_id], buf_list); //方法2
        if (!manager.submitRequests(buf_list, aligned_offset_list, aligned_length_list, ids, cid_list.get_count())) {
            std::cout << "LastLayerVectors::retrival_topk_batch Failed to submit IO requests." << std::endl;
            exit(0);
        }

        //3.等待IO完成
        int length=0;
        int complete_request = 0;
        //批量获取完成的请求index
        auto st = perf.get_time();
        while (1) {
            std::vector<int> completedIndices;
            int complete_index_count = manager.getCompletedResultIds(completedIndices);
            // cout << "complete_index_count: " << complete_index_count << endl;
            for(int i=0; i<complete_index_count; i++){
                int complete_index = completedIndices[i];
                char* data = (char*)(buf_list[complete_index]);
                data = data + aligned_offset_in_buf(offset_list[complete_index]); //获取到对应的buf
                length = length_list[complete_index];
                // cout << " offset in buf: " << aligned_offset_in_buf(offset_list[complete_index]) << " length: " << length << endl;
                auto st_compute_cluster = perf.get_time();
                update_minlist_by_buf(query, result_list, data, length); //更新minlist
                auto et_compute_cluster = perf.get_time();
                perf.add_time(q_id, 1+(layer*3+1), perf.elapsed_time(st_compute_cluster, et_compute_cluster)); //增加计算时间
            }
            complete_request += complete_index_count;
            if (complete_request >= cid_list.get_count()) {
                // std::cout << "All requests completed." << std::endl;
                break;
            }
        }
        auto et = perf.get_time();
        perf.add_time(q_id, 1+(layer*3+2), perf.elapsed_time(st, et)); //增加计算时间
        
        //释放buf
        for(int i=0; i<cid_list.get_count(); i++){
            free(buf_list[i]);
        }
        return 0;
    }

    LastLayerVectors(){
    }

    ~LastLayerVectors(){
        // for(int i=0; i<thread_count; i++){
        //     if(fp_array[i].is_open())
        //         fp_array[i].close();
        // }
        for(int i=0; i<thread_count; i++){
            if(fd_array[i]!=-1)
                close(fd_array[i]);
        }
    }
};

//PQ向量构建器，根据PQ压缩向量和PQ表构建出近似的原始向量
//T代表的是向量表里面的是什么类型的数据（通常与数据集的向量类型是一致的）
//正常情况下，T都是float，因为faiss输出的都是float类型的
//目前仅支持uint8类型的PQ向量
template <class T>
class PQConstructor{
private:
    int pq_bucket;
    int pq_bit;
    int dim; //保存原始向量的维数
    //预先计算降低解码开销
    int bucket_vector_count; //每个桶包含的截断向量的数量 #256
    int pq_vector_dim; //每个截断向量的维度 #8
    int bucket_size; //每个桶的占用空间，用于计算偏移量

    string pq_vector_path;
    string pq_table_path;
    string pq_centorid_path;
    Bin<PQTYPE> pq_vector;
    Bin<T> pq_table;
    Bin<T> pq_centorid; //PQ编码的质心，就是一个向量，PQ解码时需要加上
    int is_add_pq_centorid; //如果pq_centorid全0则不加了

public:
    void init(json user_config, json dataset_config){
        pq_bucket = dataset_config["pq_bucket"];
        pq_bit = dataset_config["pq_bit"];
        dim = dataset_config["dim"];

        bucket_vector_count = 1 << pq_bit;
        pq_vector_dim = dim / pq_bucket;
        bucket_size = bucket_vector_count * pq_vector_dim; 

        cout << "PQConstructor init with bucket_vector_count: " << bucket_vector_count << " pq_vector_dim: " << pq_vector_dim << " bucket_size: " << bucket_size << endl;

        pq_vector_path = string(user_config["index_root_path"])+string(dataset_config["pq_vector_path"]);
        pq_table_path = string(user_config["index_root_path"])+string(dataset_config["pq_table_path"]);
        pq_centorid_path = string(user_config["index_root_path"])+string(dataset_config["pq_centorid_path"]);

        pq_vector.init_with_file(pq_vector_path, 1, 0); //PQ向量不一定全量加载，有的调用者会自己传入PQ向量
        pq_table.init_with_file(pq_table_path, 1, 1);   //向量表全量加载
        pq_centorid.init_with_file(pq_centorid_path, 1, 1); 
        //将pq_centorid全0则is_add_pq_centorid为0，否则为1
        is_add_pq_centorid = 0;
        for(int i=0; i<dim; i++){
            if(pq_centorid.get_line(0)[i]!=0){
                is_add_pq_centorid = 1;
                break;
            }
        }
    }

    //方便调用者分配空间
    //需要调用者释放内存
    T* get_empty_vector(){
        return new T[dim];
    }

    //根据编码重构向量
    int get_vector_by_code(PQTYPE* code, T* vector_buf){
        for(int i=0; i<pq_bucket; i++){
            T* bucket_start_addr = pq_table.get_line(i); //当前分桶的起始地址
            int vector_offset_in_bucket = code[i] * pq_vector_dim; //当前向量在桶内的起始偏移量,单位是T的size
            //拷贝到vector中
            memcpy(vector_buf+i*pq_vector_dim, bucket_start_addr+vector_offset_in_bucket, pq_vector_dim*sizeof(T));
        }
        //累加向量中心点
        if(is_add_pq_centorid){
            T* pq_centorid_vector = pq_centorid.get_line(0);
            for(int i=0; i<dim; i++){
                vector_buf[i] += pq_centorid_vector[i];
            }
        }
        return 0;
    }

    //调用者负责初始化vector的空间
    int get_vector_by_id(int index, T* vector_buf){
        PQTYPE* code_row = pq_vector.get_line(index);
        get_vector_by_code(code_row, vector_buf);
        return 0;
    }

    PQConstructor(){
    }
    ~PQConstructor(){
    }
};

//这个图支持向量（包括原始向量和PQ向量）合并存储的结构
//初始化时指定向量的类型和大小。为0则代表只包含邻接表
//T代表的是原始向量的类型，用于确定query的类型,以及节点类型1或者4的时候的原始向量类型。PQ向量永远都是uint8_t，解码出来的向量永远都是float，因此这两个不用指定
template <class T>
class Graph{
private: 
    int n; //节点数量
    int R; //最大邻居数
    int PQ_dim; //PQ向量的维数
    int Vector_dim; //原始向量的维数
    int node_type;  //0代表只包含邻接表，
                    //1代表包含当前节点的原始向量(类似DiskANN)
                    //2代表包含PQ向量
                    //3代表类似与AiSAQ的结构
                    //4在3的基础上在末尾增加了原始向量以提高精度

    int neibor_table_size; //每个点的邻接表占用大小(B)，=R*sizeof(int)
    int vector_size; //每个点的向量的占用大小(B)，=dim*sizeof(T)。这里的向量不仅是类似于DiskANN那样在邻接表后面存储原始向量，而是也包含类似AiSAQ的结构中包含的所有邻居向量
    int raw_vector_size; //原始向量的大小，仅在node_type=4时有效
    int node_size; //每个点所占的大小(B)，为上面两项相加
    int alignment_size; //文件对齐的粒度(B)，一般为4K
    int node_per_page; //每个页包含多少个节点，=alignment_size/buf_entry_size
    int vectors_raw_type; //当node_type=4时，需要判断存储原始向量的类型
                          //0表示无效，1表示float32，2表示uint8_t

    //图加载模式相关
    int graph_load_mode;    //0表示图文件按需读取，1表示全量加载
    char* graph_buf;        //如果graph_load_mode=1，则用来存储完整的图文件
    int graph_buf_length;   //buf的长度、也就是文件的长度

    //多线程相关
    int thread_count; //支持的向量并发数
    std::vector<int> graph_fd_array;   //创建文件句柄数组，为每个线程分配一个文件句柄
    std::vector<char*> buf_array;  //为每个线程分配一个读buffer,大小与node_size相同
    std::vector<int> buf_array_info; //buf_array里面已经缓存的数据，-1表示不合法

    //BeamSearch相关
    std::vector<IOUringManager_ASYNC> io_manager_array; //每个线程分配一个IOUringManager
    std::vector<std::vector<void*>> io_manager_buf_array_2d; //用于beamsearch的buffer，二维数组.行数等于线程数，列数等于MAX_BEAMSEARCH_WIDTH

    int bloomfilter_size; //布隆过滤器的大小
    int bloomfilter_hash_count; //布隆过滤器的哈希函数数量

    PQConstructor<PQ_DECODE_TYPE> pq_constructor; //节点向量为PQ向量时需要。不需要像Bin那样多线程化，因为工作时只需要访问PQ表但是PQ表肯定是要常驻内存的，而且返回结果时是调用者传入buf，不会引发多线程冲突

    string graph_filepath; //图文件的路径
    string vector_filepath; //向量文件的路径,仅在部分图类型中有用,例如类型0的时候加载
    Bin<T> vectors_raw; //节点类型为0时存储原始向量用
    Bin<PQTYPE> vectors_pq;

public:
    //单独传入向量文件路径增强灵活性
    //graph_load_mode=0表示按需加载,1表示全量加载
    //注意当全量加载且node_type=1/2时，将不会加载向量文件
    int init(json user_config, json dataset_config, string graph_path, string vector_path, int graph_load_mode=0, int vector_load_mode=0){ 
        //从配置文件中读取的参数
        thread_count = user_config["thread_count"];        
        bloomfilter_size = user_config["bloomfilter_size"];
        bloomfilter_hash_count = user_config["bloomfilter_hash_count"];
        alignment_size = dataset_config["alignment_size"];
        Vector_dim = dataset_config["dim"];
        PQ_dim = dataset_config["pq_bucket"];

        if(alignment_size != ANN_BLOCK_SIZE){
            cout << "[Error!!!]alignment_size in config file: " << alignment_size << " not same with ANN_BLOCK_SIZE:" << ANN_BLOCK_SIZE << endl;
            cout << "Please change ANN_BLOCK_SIZE in io_engine.h and re-compile" << endl;
            exit(0);
        }

        //传入参数
        graph_filepath = graph_path;
        vector_filepath = vector_path;
        this->graph_load_mode = graph_load_mode;

        //读取图文件的头部
        ifstream fp(graph_filepath, ios::binary);
        if(!fp.is_open()){
            cout << "[Error!!!]Graph file " << graph_filepath << " open failed" << endl;
            exit(0);
        }
        fp.read((char*)&n, sizeof(int));
        fp.read((char*)&R, sizeof(int));
        fp.read((char*)&node_type, sizeof(int));
        fp.read((char*)&node_per_page, sizeof(int));
        fp.read((char*)&neibor_table_size, sizeof(int));
        fp.read((char*)&vector_size, sizeof(int));
        fp.read((char*)&raw_vector_size, sizeof(int));
        //读取8个字节到vectors_raw_type
        char raw_type_buf[8]; // 定义 8 字节的缓冲区
        fp.read(raw_type_buf, 8); // 读取到缓冲区
        fp.close();

        //如果包含了raw_vector_size，判断类型
        if(raw_vector_size > 0){
            if(memcmp(raw_type_buf, "float32", 8) == 0){
                vectors_raw_type = 1;
            }
            else if(memcmp(raw_type_buf, "uint8_t", 8) == 0){
                vectors_raw_type = 2;
            }
            else{
                cout << "[Error!!!]Graph file " << graph_filepath << " raw_type_buf is not float32 or uint8_t" << endl;
                exit(0);
            }
        }
        else
            vectors_raw_type = 0;

        node_size = neibor_table_size + vector_size + raw_vector_size;
        cout << "Graph init with n: " << n << " R: " << R << " node_type: " << node_type << " node_per_page: " << node_per_page << " neibor_table_size: " << neibor_table_size << " vector_size: " << vector_size;
        cout << " raw_vector_size: " << raw_vector_size << " node_size:" << node_size << " vectors_raw_type:" << vectors_raw_type << endl;

        //如果是全量加载，获取graph_filepath的文件大小，并读取
        if(graph_load_mode == 1){
            struct stat file_stat;
            if (stat(graph_filepath.c_str(), &file_stat) != 0) {
                std::cerr << "Failed to get file size." << std::endl;
                return -1;
            }
            graph_buf_length = file_stat.st_size;
            graph_buf = new char[graph_buf_length];
            ifstream fp(graph_filepath, ios::binary);
            if(!fp.is_open()){
                std::cout << "无法打开文件" << graph_filepath << std::endl;
                exit(1);
            }
            fp.read(graph_buf, graph_buf_length);
            fp.close();
            cout << "Graph file all loaded, size: " << graph_buf_length << endl;
        }
        else if(graph_load_mode == 0){  //按需加载模式初始化句柄和缓存
            for(int i=0; i<thread_count; i++){
                int fd = open(graph_filepath.c_str(), O_RDONLY | O_DIRECT);
                if (fd == -1) {
                    std::cerr << "Failed to open file: " << graph_filepath << std::endl;
                    exit(1);
                }
                graph_fd_array.push_back(fd);
                char* buf = (char*)get_aligned_buffer(node_size); //分配对齐的内存
                buf_array.push_back(buf);
                buf_array_info.push_back(-1); //-1表示没有缓存数据
            }
            //如果不需要异步IO，则下面的可以注释掉
            io_manager_array.reserve(thread_count);
            for (int t = 0; t < thread_count; ++t) {
                io_manager_array.emplace_back(graph_fd_array[t], MAX_BEAMSEARCH_WIDTH);
                std::vector<void*> io_manager_buf_array_1d;
                for(int j=0; j < MAX_BEAMSEARCH_WIDTH; j++){ //为每个线程分配MAX_BEAMSEARCH_WIDTH个buffer
                    io_manager_buf_array_1d.push_back(get_aligned_buffer(node_size));
                }
                io_manager_buf_array_2d.push_back(io_manager_buf_array_1d);
            }
        }

        //根据不同的节点类型初始化向量和PQ重构器
        if(node_type == 0){
            vectors_raw.init_with_file(vector_filepath, thread_count, vector_load_mode); //这里初始化的应该是原始向量，按需读取
            if(vectors_raw.get_n() != n){
                cout << "[Error!!!]Graph init with vectors_raw file size not match" << endl;
                cout << "vectors_raw file size: " << vectors_raw.get_n() << " graph file size: " << n << endl;
                exit(0);
            }
        }
        else if(node_type == 1){ //DiskANN模式
            if(graph_load_mode == 1){ //如果已经完整缓存了图文件，则不需要加载向量文件，图文件中包含了向量
                
            }
            else{
                vectors_pq.init_with_file(vector_filepath, thread_count, vector_load_mode); //这里初始化的应该是PQ向量，全量加载
                if(vectors_pq.get_n() != n){
                    cout << "[Error!!!]Graph init with vectors_pq file size not match" << endl;
                    cout << "vectors_pq file size: " << vectors_pq.get_n() << " graph file size: " << n << endl;
                    exit(0);
                }
            }
            pq_constructor.init(user_config, dataset_config); 
        }
        else if(node_type == 2)
        {
            if(graph_load_mode == 1){ //如果已经完整缓存了图文件，则不需要加载向量文件，图文件中包含了向量
                
            }
            else{
                //正常不会出现type2+按需加载的情况(type2目前只给L1用)，出现的话应该是出错了，直接退出方便调试
                cout << "Can't init graph with type 2 without load all mode" << endl;
                exit(0);
                // vectors_pq.init_with_file(vector_filepath, thread_count, 1); //这里初始化的应该是PQ向量，全量加载
                // if(vectors_pq.get_n() != n){
                //     cout << "[Error!!!]Graph init with vectors_pq file size not match" << endl;
                //     cout << "vectors_pq file size: " << vectors_pq.get_n() << " graph file size: " << n << endl;
                //     exit(0);
                // }
            }
            pq_constructor.init(user_config, dataset_config); 
        }
        else if(node_type == 3 || node_type == 4)
        {
            pq_constructor.init(user_config, dataset_config); //向量保存在图中，这里只需要PQ重构器即可
        }
        else
        {
            cout << "[Error!!!]Graph::init node_type error" << endl;
            exit(0);
        }

        cout << "Graph init with R: " << R << " PQ dim: " << PQ_dim << " node_type: " << node_type << endl;
        cout << "Graph init with neibor_table_size: " << neibor_table_size << " vector_size: " << vector_size << " buf_entry_size: " << node_size << endl;
        
        return 0;
    }

    int get_R(){
        return R;
    }

    //获取某个向量在图文件中的偏移量
    long long get_offset_in_file(int id){
        long long page_number = id / node_per_page + 1;
        int offset_in_page = id % node_per_page;
        long long offset_in_file = page_number * alignment_size + offset_in_page * node_size;
        return offset_in_file;
    }

    //加载整个节点到线程对应的缓存中
    //执行结束后data_addr指向节点的起始位置，是线程对应的缓存区中的一个地址。这样做的目的是便于调用者获取加载到的数据的地址，因为缓冲区是对齐的
    //返回值代表进行了多少次IO操作，用于性能统计
    int load_node_to_buf(int id, char*& data_addr, int thread_id=0){
        if(id >= n){
            cout << "[Error!!!]Graph::load_node id out of range" << endl;
            cout << "id: " << id << " n: " << n << endl;
            exit(0);
        }
        if(graph_load_mode == 1){ //全量加载模式下，不用读取直接返回
            // memcpy(buf_array[thread_id], graph_buf+get_offset_in_file(id), node_size);
            return 1; 
        }
        else if(graph_load_mode == 0){ //按需读取
            if(buf_array_info[thread_id] != id){ //如果已经缓存了该节点，则直接返回
                char* buf = buf_array[thread_id]; //提前分配的缓存，已对齐
                int fd = graph_fd_array[thread_id];
                int offset = get_offset_in_file(id);
                int aligned_offset = aligned_offset_in_file(offset);
                if (lseek(fd, aligned_offset, SEEK_SET) == -1) {
                    cout << "Error, load_node_to_buf lseek failed" << endl;
                    exit(0);
                }
                ssize_t bytes_read = read(fd, buf, aligned_size(node_size));
                if (bytes_read == -1) {
                    cout << "Error, load_node_to_buf read failed" << endl;
                    exit(0);
                }
                data_addr = buf + aligned_offset_in_buf(offset);

                buf_array_info[thread_id] = id; //更新缓存信息
                return 1; 
            }
            else{ //如果已经缓存了该节点，则直接返回
                data_addr = buf_array[thread_id] + aligned_offset_in_buf(get_offset_in_file(id));
                return 0;
            }
        }
        return 0;
    }

    //用于在全量加载模式时，从图文件中获取任意节点的向量。注意这里返回char*，如果是PQ向量则需要解码，解码工作由调用者负责
    //不支持在非全量加载模式使用该接口，否则直接报错
    //返回的地址不能释放
    char* get_vector_from_graph(int id, int thread_id=0){
        if(id >= n){
            cout << "[Error!!!]Graph::get_vector_from_graph id out of range" << endl;
            cout << "id: " << id << " n: " << n << endl;
            exit(0);
        }
        if(node_type != 1 && node_type != 2 && node_type != 4){ //只有DiskANN和类型2的节点支持获取向量.(更新: node_type4也支持)
            cout << "[Error!!!]Graph::get_vector_from_graph node_type=" << node_type << " error" << endl;
            exit(0);
        }
        if(graph_load_mode == 1){ //全量加载模式下，直接返回邻接表的起始地址
            if(node_type == 1 || node_type == 2)
                return (char*)(graph_buf + get_offset_in_file(id) + neibor_table_size); 
            else if(node_type == 4)
                return (char*)(graph_buf + get_offset_in_file(id) + neibor_table_size + vector_size); //类型4的原始向量存储在末尾
        }
        else{ //正常情况下,按需加载模式下不会使用该函数,如果出现说明程序写错了
            cout << "[Error!!!]Graph::get_vector_from_graph graph_load_mode error" << endl;
            exit(0);
        }
        cout << "[Error!!!]Graph::get_vector_from_graph graph_load_mode error" << endl;
        exit(0);
    }

    //传入id是为了在全量加载模式下访存
    //返回的地址不用释放
    int* get_neibor_from_buf(int id, char* data_addr, int thread_id=0){
        if(graph_load_mode == 1){ //全量加载模式下，直接返回邻接表的起始地址
            return (int*)(graph_buf + get_offset_in_file(id)); 
        }
        else if(graph_load_mode == 0){ //按需加载模式下，返回buf
            return (int*)data_addr; //返回邻接表的起始地址
        }
        cout << "[Error!!!]Graph::get_neibor_from_buf graph_load_mode error" << endl;
        exit(0);
    }

    //仅为了给DiskANN的二次评估用
    //返回的地址不用释放
    T* get_vector_from_buf(int id, char* data_addr, int thread_id=0){
        if(node_type == 1){ //DiskANN模式
            if(graph_load_mode == 1){ //全量加载模式下，直接返回邻接表的起始地址
                return (T*)(graph_buf + get_offset_in_file(id) + neibor_table_size); 
            }
            else if(graph_load_mode == 0){ //按需加载模式下，返回buf
                return (T*)(data_addr + neibor_table_size); //返回邻接表的起始地址
            }
        }
        if(node_type == 4){ //AiSAQ+原始向量模式（nodetype4）
            if(graph_load_mode == 1){ //全量加载模式下，直接返回邻接表的起始地址
                return (T*)(graph_buf + get_offset_in_file(id) + neibor_table_size + vector_size); 
            }
            else if(graph_load_mode == 0){ //按需加载模式下，返回buf
                return (T*)(data_addr + neibor_table_size + vector_size); //返回邻接表的起始
            }
        }
        cout << "[Error!!!]Graph::get_neibor_from_buf graph_load_mode error" << endl;
        exit(0);
    }

    //从线程对应的buf中获取邻居的向量。
    //如果是node_type=0、1则需要单独读取
    //如果是node_type=2则直接从图中读取并解码
    //如果是node_type=3、4则直接从buf中读取
    //返回的是新的内存空间，需要调用者手动释放
    //更新：该函数只会返回float类型的，因为PQ解码器的限制。因此为了统一，当node_type=0时也会返回强制类型转换后的float
    PQ_DECODE_TYPE* get_vector_of_neibor_of_buf(int id, char* data_addr, int neibor_index, int thread_id=0){
        PQ_DECODE_TYPE* vector_buf_ret = new PQ_DECODE_TYPE[Vector_dim];
        if(node_type == 0){ //需要单独读取。
            int* neibor_list = get_neibor_from_buf(id, data_addr, thread_id);
            int neibor_id = neibor_list[neibor_index];
            T* vector_buf = vectors_raw.get_line(neibor_id, thread_id); //因为是按需加载，所以需要传入线程ID
            //转换为PQ_DECODE_TYPE
            for(int i=0; i<Vector_dim; i++)
                vector_buf_ret[i] = static_cast<PQ_DECODE_TYPE>(vector_buf[i]);
        }
        else if(node_type == 1){ //DiskANN布局
            int* neibor_list = get_neibor_from_buf(id, data_addr, thread_id);
            int neibor_id = neibor_list[neibor_index]; 
            if(graph_load_mode == 1){ //全量加载模式下，直接从buf中读取原始向量
                T* vector_buf = (T*)get_vector_from_graph(neibor_id, thread_id);
                for(int i=0; i<Vector_dim; i++)
                    vector_buf_ret[i] = static_cast<PQ_DECODE_TYPE>(vector_buf[i]);
            }
            else{ //按需加载模式下读取pq向量数组并解码
                PQTYPE* code_row = vectors_pq.get_line(neibor_id, thread_id);
                pq_constructor.get_vector_by_code(code_row, vector_buf_ret);
            }
        }
        else if(node_type == 2){ //读取PQ向量后解码
            int* neibor_list = get_neibor_from_buf(id, data_addr, thread_id);
            int neibor_id = neibor_list[neibor_index];
            if(graph_load_mode == 1){ //全量加载模式下，直接从图中读取PQ向量并解码
                PQTYPE* code_row = (PQTYPE*)get_vector_from_graph(neibor_id, thread_id);
                pq_constructor.get_vector_by_code(code_row, vector_buf_ret);
            }
            else{ //按需加载模式下读取pq向量数组并解码
                cout << "Error! Graph::get_vector_of_neibor_of_buf graph_load_mode error" << endl;
                // exit(0);
                PQTYPE* code_row = vectors_pq.get_line(neibor_id, thread_id);
                pq_constructor.get_vector_by_code(code_row, vector_buf_ret);
            }
        }
        else if(node_type == 3 || node_type == 4){ //直接从buf中读取
            PQTYPE* code_row = NULL;
            if(graph_load_mode == 1){ //全量加载模式下，直接从buf中读取原始向量
                code_row = (PQTYPE*)(graph_buf + get_offset_in_file(id) + neibor_table_size + neibor_index * PQ_dim * sizeof(PQTYPE));
            }
            else if(graph_load_mode == 0){ //按需加载模式下
                code_row = (PQTYPE*)(data_addr + neibor_table_size + neibor_index * PQ_dim * sizeof(PQTYPE));
            }
            pq_constructor.get_vector_by_code(code_row, vector_buf_ret);
        }
        else{
            cout << "[Error!!!]Graph::get_vector_of_neibor_of_buf node_type error" << endl;
            exit(0);
        }

        return vector_buf_ret;
    }

    //在能够确定entry的位置的情况下使用
    void reranking_updatebyindex(T* query_vector, MinList& result_list, int index, int node_id, char* data_addr, int thread_id){
        //如果是node_type 1(DiskANN)或者4，则进行重评估增强精度
        if(node_type == 1){ 
            T* vector_raw = get_vector_from_buf(node_id, data_addr, thread_id);
            float d = DISTANCE(query_vector, vector_raw, Vector_dim);
            // cout << "node_id:" << node_id << "'s distance update from " << result_list.get_distance(i) << " to " << d << endl;
            result_list.update_entry_by_index(index, d);
        }
        else if(node_type == 4){
            T* vector_raw = get_vector_from_buf(node_id, data_addr, thread_id);
            float d = 0;
            if(vectors_raw_type == 1){
                float* vector_raw_float = (float*)vector_raw;
                d = DISTANCE(query_vector, vector_raw_float, Vector_dim);
            }
            else if(vectors_raw_type == 2){
                uint8_t* vector_raw_uint8 = (uint8_t*)vector_raw;
                d = DISTANCE(query_vector, vector_raw_uint8, Vector_dim);
            }
            else{
                cout << "[Error!!!]Graph::search_topk vectors_raw_type error" << endl;
                exit(0);
            }
            // cout << "node_id:" << node_id << "'s distance update from " << result_list.get_distance(i) << " to " << d << endl;
            result_list.update_entry_by_index(index, d);
        }
    }

    //在无法确定entry的位置的情况下使用，更新不一定成功
    void reranking_updatebyid(T* query_vector, MinList& result_list, int node_id, char* data_addr, int thread_id){
        //如果是node_type 1(DiskANN)或者4，则进行重评估增强精度
        if(node_type == 1){ 
            T* vector_raw = get_vector_from_buf(node_id, data_addr, thread_id);
            float d = DISTANCE(query_vector, vector_raw, Vector_dim);
            // cout << "node_id:" << node_id << "'s distance update from " << result_list.get_distance(i) << " to " << d << endl;
            result_list.update_entry_by_id(node_id, d);
        }
        else if(node_type == 4){
            T* vector_raw = get_vector_from_buf(node_id, data_addr, thread_id);
            float d = 0;
            if(vectors_raw_type == 1){
                float* vector_raw_float = (float*)vector_raw;
                d = DISTANCE(query_vector, vector_raw_float, Vector_dim);
            }
            else if(vectors_raw_type == 2){
                uint8_t* vector_raw_uint8 = (uint8_t*)vector_raw;
                d = DISTANCE(query_vector, vector_raw_uint8, Vector_dim);
            }
            else{
                cout << "[Error!!!]Graph::search_topk vectors_raw_type error" << endl;
                exit(0);
            }
            // cout << "node_id:" << node_id << "'s distance update from " << result_list.get_distance(i) << " to " << d << endl;
            result_list.update_entry_by_id(node_id, d);
        }
    }

    //核心搜索函数，根据用户给定的向量，搜索出最相似的K个节点.K的数量由MinList的长度决定
    //纯BFS搜索，不支持beam_search
    int search_topk(T* query_vector, MinList& result_list, Performance_Analyser& perf, int q_id, int layer, int thread_id=0, float io_limit_ratio=10, float update_ratio=1, BloomFilter* pre_filter=nullptr){
        int io_limit = io_limit_ratio * result_list.get_size();
        int update_limit = update_ratio * result_list.get_size();
        update_limit = max(update_limit, 0);
        update_limit = min(update_limit, result_list.get_size());
        // cout << "io_limit:" << io_limit << ", update_limit:" << update_limit << endl;
        
        MinListEntry temp;
        //如果没有数据则填充入口点以初始化。否则根据里面的数据继续搜索
        if(result_list.get_count() == 0){ //没有数据，填充入口点（默认入口点为0）
            temp.id = 0;
            temp.distance = 999999.0; //目前先用一个大值来表示，0是候选者的概率很低，而且如果0真的是topk后面也会遍历回来的
            temp.flag = 0;
            result_list.insert(&temp);
        }
        //开始遍历图
        BloomFilter visited_list(bloomfilter_size, bloomfilter_hash_count);
        if(pre_filter != nullptr){
            visited_list.copy_from(pre_filter); //将预过滤器的内容复制到visited_list中
        }

        int io_count = 0;
        int repeat_neibor_count = 0;
        while(1){
            //从头开始找MinList中未访问的点，如果全部都访问了则退出
            int visited_count = 0;
            for(int i=0; i<update_limit; i++){ 
                if(visited_list.check(result_list.get_id(i))){
                    visited_count++;
                    if(visited_count >= update_limit) //全部都访问过了
                        return 0;
                    continue;
                }

                //如果没有访问过该节点的邻居，则继续访问
                int node_id = result_list.get_id(i); //获取当前节点的ID
                visited_list.add(node_id); //标记为已访问
                // result_list.set_visited(i);
                //读取node_id的图节点，里面可能包含了领居向量也可能不包含
                char* data_addr;
                auto load_st = perf.get_time();
                int io_count = load_node_to_buf(node_id, data_addr, thread_id); //加载到之前预分配的缓存中
                auto load_et = perf.get_time();
                perf.add_time(q_id, 1+(layer*3+2), perf.elapsed_time(load_st, load_et));
                perf.add_io_count(q_id, layer, io_count); //增加访问的节点数

                reranking_updatebyindex(query_vector, result_list, i, node_id, data_addr, thread_id);

                //读取邻接表，默认从缓存中读取
                int* neibor_list = get_neibor_from_buf(node_id, data_addr, thread_id);
                //读取每个邻居的向量，并计算距离后加入到结果列表中
                for(int neibor_index=0; neibor_index<R; neibor_index++){
                    int neibor_id = neibor_list[neibor_index];
                    if(neibor_id == -1 || neibor_id > n){ //遇到了无效邻居说明邻接表到头了
                        break;
                    }
                    if(visited_list.check(neibor_id)){
                        repeat_neibor_count++;
                        continue; //如果已经访问过则跳过
                    }
                    PQ_DECODE_TYPE* vector_buf = get_vector_of_neibor_of_buf(node_id, data_addr, neibor_index, thread_id); //获取邻居的向量
                    float distance = DISTANCE(query_vector, vector_buf, Vector_dim); //计算距离
                    delete[] vector_buf; //释放内存
                    temp.id = neibor_id;
                    temp.distance = distance;
                    temp.flag = 0; //新节点没有访问过
                    #ifdef FAT_MINLIST_ENTRY
                    temp.parent_node_id = node_id;
                    temp.root_node_id = result_list.get_root_id(i);
                    temp.hop = result_list.get_hop(i) + 1;
                    #endif
                    result_list.insert(&temp);
                    perf.add_visited_count(q_id, layer, 1); //增加访问的节点数
                }
                auto compute_et = perf.get_time();
                perf.add_time(q_id, 1+(layer*3+1), perf.elapsed_time(load_et, compute_et));
                perf.set_trivial_value(q_id, 0, repeat_neibor_count);

                //访问后就进入下一次循环访问更新后的MinList
                break;
            }

            //预先终止
            io_count++;
            if(io_count >= io_limit){
                break;
            }
        }

        return 0;
    }

    //beam search
    //目前只考虑了AiSAQ模式的IO
    int search_topk_beam(T* query_vector, MinList& result_list, Performance_Analyser& perf, int q_id, int layer, int beamwidth_start=2, float beamwidth_increase_factor=1.5, int thread_id=0, float io_limit_ratio=10, float update_ratio=1, BloomFilter* pre_filter=nullptr){
        if(graph_load_mode == 1 || beamwidth_start == 0){ //全量加载模式或者beamwidth_start=0下，不需要用到beam search
            return search_topk(query_vector, result_list, perf, q_id, layer, thread_id, io_limit_ratio, update_ratio);
        }
        
        int io_limit = io_limit_ratio * result_list.get_size();
        int update_limit = update_ratio * result_list.get_size();
        update_limit = max(update_limit, 0);
        update_limit = min(update_limit, result_list.get_size());
        // cout << "io_limit:" << io_limit << ", update_limit:" << update_limit << endl;
        
        MinListEntry temp;
        //如果没有数据则填充入口点以初始化。否则根据里面的数据继续搜索
        if(result_list.get_count() == 0){ //没有数据，填充入口点（默认入口点为0）
            cout << "fill node with 0" << endl;
            temp.id = 0;
            temp.distance = 999999.0; //目前先用一个大值来表示，0是候选者的概率很低，而且如果0真的是topk后面也会遍历回来的
            temp.flag = 0;
            result_list.insert(&temp);
        }
        //开始遍历图
        BloomFilter visited_list(bloomfilter_size, bloomfilter_hash_count);
        if(pre_filter != nullptr){
            visited_list.copy_from(pre_filter); //将预过滤器的内容复制到visited_list中
        }

        int io_count = 0; //记录io次数，仅用于提前终止
        int beamwidth_cur = beamwidth_start;
        while(1){ 
            //1.从头开始找MinList中未访问的点，如果全部都访问了则退出,没有访问的就加入待访问列表直到达到当前beam search上限
            int visited_count = 0;
            std::vector<int> access_node_id_list; //保存当前round准备读取的节点的id
            for(int i=0; i<update_limit; i++){ 
                int node_id = result_list.get_id(i);
                if(node_id == -1){
                    break;
                }
                if(visited_list.check(node_id)){
                    visited_count++;
                    if(visited_count >= update_limit){ //全部都访问过了
                        // cout << "all visited" << endl;
                        return 0;
                    }
                    continue;
                }
                //找到一个未访问的点，加入待访问列表
                access_node_id_list.push_back(node_id);
                //如果access_node_id_list里面的元素大于等于beamwidth_cur则终止遍历
                if((int)(access_node_id_list.size()) >= beamwidth_cur)
                    break;
            } 
            //退出的原因可能不是达到了beamwidth_cur，而是因为所有节点都已经被访问过了。
            //access_node_id_list的长度可能是小于等于beamwidth_cur的任何值，但是不可能是0
            int current_true_beamwidth = access_node_id_list.size();
            if(current_true_beamwidth == 0 || current_true_beamwidth > beamwidth_cur || current_true_beamwidth > MAX_BEAMSEARCH_WIDTH){
                cout << "[Error]Graph::search_topk_beam error" << endl;
                cout << "|-access_node_id_list.size() = " << access_node_id_list.size() << endl;
                cout << "|-beamwidth_cur = " << beamwidth_cur << endl;
                exit(0);
            }

            //2.对待访问列表中的节点发起IO请求
            IOUringManager_ASYNC& manager = io_manager_array[thread_id];
            std::vector<void*> buf_list = io_manager_buf_array_2d[thread_id];
            //构造请求
            vector<long long> aligned_offset_list;
            vector<long long> aligned_length_list;
            vector<int> ids;
            for(int i = 0; i < current_true_beamwidth; i++){
                aligned_offset_list.push_back(aligned_offset_in_file(get_offset_in_file(access_node_id_list[i])));
                aligned_length_list.push_back(aligned_size(node_size));
                ids.push_back(i);
            }
            if (!manager.submitRequests(buf_list, aligned_offset_list, aligned_length_list, ids, current_true_beamwidth)) {
                std::cout << "Graph::search_topk_beam Failed to submit IO requests." << std::endl;
                exit(0);
            }
            perf.add_io_count(q_id, layer, current_true_beamwidth); //增加IO次数
            // cout << "Graph::search_topk_beam include "<< access_node_id_list.size() <<" requests, access_node_id_list:" << endl;
            // print_array(access_node_id_list, access_node_id_list.size());

            //3.轮询请求直到都完成
            auto load_st = perf.get_time();
            int complete_request = 0;
            while (complete_request < current_true_beamwidth) {
                std::vector<int> completedIndices; //完成了的IO请求的下标列表
                int complete_index_count = manager.getCompletedResultIds(completedIndices);
                complete_request += complete_index_count;
                for(int i=0; i<complete_index_count; i++){ //对于每个完成了的请求的index
                    int complete_index = completedIndices[i]; //完成的是第几个IO
                    int complete_id = access_node_id_list[complete_index]; //完成的请求的节点id
                    visited_list.add(complete_id); //标记为已访问
                    char* data_addr = (char*)(buf_list[complete_index]);
                    data_addr += aligned_offset_in_buf(get_offset_in_file(complete_id));

                    reranking_updatebyid(query_vector, result_list, complete_id, data_addr, thread_id);

                    //读到node后进行计算
                    int* neibor_list = get_neibor_from_buf(complete_id, data_addr, thread_id);
                    //读取每个邻居的向量，并计算距离后加入到结果列表中
                    for(int neibor_index=0; neibor_index<R; neibor_index++){
                        int neibor_id = neibor_list[neibor_index];
                        if(neibor_id == -1 || neibor_id > n){ //遇到了无效邻居说明邻接表到头了
                            break;
                        }
                        if(visited_list.check(neibor_id)){
                            continue; //如果已经访问过则跳过
                        }
                        PQ_DECODE_TYPE* vector_buf = get_vector_of_neibor_of_buf(complete_id, data_addr, neibor_index, thread_id); //获取邻居的向量
                        float distance = DISTANCE(query_vector, vector_buf, Vector_dim); //计算距离
                        delete[] vector_buf; //释放内存
                        temp.id = neibor_id;
                        temp.distance = distance;
                        temp.flag = 0; //新节点没有访问过
                        result_list.insert(&temp);
                        perf.add_visited_count(q_id, layer, 1); //增加访问的节点数
                    }
                }
            }
            auto load_et = perf.get_time();
            perf.add_time(q_id, 1+(layer*3+2), perf.elapsed_time(load_st, load_et));

            //预先终止
            io_count += complete_request;
            if(io_count >= io_limit){
                break;
            }

            //计算下一批次的beamwidth_cur
            beamwidth_cur = (int)(beamwidth_cur * beamwidth_increase_factor);
            beamwidth_cur = std::min(beamwidth_cur, MAX_BEAMSEARCH_WIDTH);
        }

        return 0;
    }

    //pipesearch的简易实现
    int search_topk_pipe(T* query_vector, MinList& result_list, Performance_Analyser& perf, int q_id, int layer, int beamwidth_start=2, float beamwidth_increase_factor=1.5, int thread_id=0, float io_limit_ratio=10, float update_ratio=1, BloomFilter* pre_filter=nullptr){
        if(graph_load_mode == 1 || beamwidth_start == 0){ //全量加载模式或者beamwidth_start=0下，不需要用到beam search
            return search_topk(query_vector, result_list, perf, q_id, layer, thread_id, io_limit_ratio, update_ratio);
        }
        
        int io_limit = io_limit_ratio * result_list.get_size();
        int update_limit = update_ratio * result_list.get_size();
        update_limit = max(update_limit, 0);
        update_limit = min(update_limit, result_list.get_size());
        // cout << "io_limit:" << io_limit << ", update_limit:" << update_limit << endl;
        
        MinListEntry temp;
        //如果没有数据则填充入口点以初始化。否则根据里面的数据继续搜索
        if(result_list.get_count() == 0){ //没有数据，填充入口点（默认入口点为0）
            temp.id = 0;
            temp.distance = INF; //目前先用一个大值来表示，0是候选者的概率很低，而且如果0真的是topk后面也会遍历回来的
            temp.flag = 0;
            result_list.insert(&temp);
        }
        //开始遍历图
        vector<int> onflight_id_list(MAX_BEAMSEARCH_WIDTH, -1); //记录当前正在进行的节点的ID的列表，-1表示当前slot可用。这个结构体也用来决定buf的分配
        int onflight_id_list_valid_count = 0; //记录当前有多少正在进行的IO
        
        IOUringManager_ASYNC& manager = io_manager_array[thread_id];  
        std::vector<void*> buf_list = io_manager_buf_array_2d[thread_id];

        BloomFilter visited_list(bloomfilter_size, bloomfilter_hash_count);
        if(pre_filter != nullptr){
            visited_list.copy_from(pre_filter); //将预过滤器的内容复制到visited_list中
        }

        int io_count = 0; //记录io次数，仅用于提前终止
        int beamwidth_cur = min(beamwidth_start,MAX_BEAMSEARCH_WIDTH); 
        while(1){ 
            // cout << "onflight_id_list_valid_count:" << onflight_id_list_valid_count << endl;
            //1. 如果有待处理请求，则首先进行一次请求结果查询，有请求完成的话先处理
            std::vector<int> completedIndex; //完成了的IO请求在onflight_id_list的下标
            int min_nr = -1;
            if(onflight_id_list_valid_count > 0)
                min_nr = 0;
            if(onflight_id_list_valid_count >= beamwidth_cur) //如果已经满了，则至少阻塞等待一个IO完成
                min_nr = 1; 
            if(min_nr >= 0){
                int complete_index_count = manager.getCompletedResultIds(completedIndex, min_nr);
                //遍历获取到的IO请求的结果
                for(int i=0; i<complete_index_count; i++){ 
                    int complete_index = completedIndex[i];
                    int complete_id = onflight_id_list[complete_index];
                    // cout << "complete_index:" << complete_index << ", complete_id:" << complete_id << endl;
                    char* data_addr = (char*)(buf_list[complete_index]);
                    data_addr += aligned_offset_in_buf(get_offset_in_file(complete_id));
                    //处理结果
                    reranking_updatebyid(query_vector, result_list, complete_id, data_addr, thread_id);
                    int* neibor_list = get_neibor_from_buf(complete_id, data_addr, thread_id);
                    //读取每个邻居的向量，并计算距离后加入到结果列表中
                    for(int neibor_index=0; neibor_index<R; neibor_index++){
                        int neibor_id = neibor_list[neibor_index];
                        if(neibor_id == -1 || neibor_id > n){ //遇到了无效邻居说明邻接表到头了
                            break;
                        }
                        if(visited_list.check(neibor_id)){
                            continue; //如果已经访问过则跳过
                        }
                        PQ_DECODE_TYPE* vector_buf = get_vector_of_neibor_of_buf(complete_id, data_addr, neibor_index, thread_id); //获取邻居的向量
                        float distance = DISTANCE(query_vector, vector_buf, Vector_dim); //计算距离
                        delete[] vector_buf; //释放内存
                        temp.id = neibor_id;
                        temp.distance = distance;
                        temp.flag = 0; //新节点没有访问过
                        result_list.insert(&temp);
                        perf.add_visited_count(q_id, layer, 1); //增加访问的节点数
                    }

                    //清空slot
                    onflight_id_list[complete_index] = -1;
                }
                onflight_id_list_valid_count -= complete_index_count; //这里有可能减的是0
            }

            //提前终止.如果当前有未完成的IO则继续等待，否则提前终止
            //如果不等待直接break，则里面会有留存的IO没有完成，将影响下一个请求
            if(io_count >= io_limit){
                if(onflight_id_list_valid_count > 0)
                    continue;
                else
                    break;
            }

            //2. 如果没有待处理的请求，或者前面已经处理过请求了，则开始访问新的节点
            //先计算当前最多能访问的节点数
            int left_slot = beamwidth_cur - onflight_id_list_valid_count;
            if(left_slot <= 0) //没有剩余槽位了，
                continue;
            //能运行到这里说明onflight_id_list有剩余位置
            int visited_count = 0; 
            std::vector<int> access_node_id_list; //保存当前round准备读取的节点的id
            for(int i=0; i<update_limit; i++){ 
                int node_id = result_list.get_id(i);
                if(node_id == -1)
                    break;
                if(visited_list.check(node_id)){
                    // cout << "node_id visited:" << node_id << endl;
                    visited_count++;
                    continue;
                }
            
                //找到一个未访问的点，加入待访问列表
                access_node_id_list.push_back(node_id);
                //如果access_node_id_list里面已经有了剩余槽位的数量，则结束
                if((int)(access_node_id_list.size()) >= left_slot)
                    break;
            } 
            if(visited_count >= update_limit){ //全部都访问过了
                if(onflight_id_list_valid_count <= 0){ //并且没有待处理的IO，则可以返回
                    // cout << "all visited" << endl;
                    return 0;
                }
                //如果有待处理的IO，则进入下一轮等待IO完成
                continue;
            }

            int access_node_id_list_length = access_node_id_list.size();
            if(access_node_id_list_length == 0 && onflight_id_list_valid_count != 0){ //如果这次没有待访问的节点则继续轮询
                continue;
            }
            else if(access_node_id_list_length == 0 || access_node_id_list_length > left_slot){
                cout << "[Error]Graph::search_topk_pipe access_node_id_list.size() error, value = " << access_node_id_list_length << endl;
                exit(0);
            }

            //3. 往onflight_id_list中加入新的IO请求
            //先构造buf
            vector<void*> buf_list_pool = io_manager_buf_array_2d[thread_id]; //属于这个线程的缓存池
            vector<void*> cur_buf_list; //待处理的IO请求的buf，是由buf_list_pool中的buf组成
            vector<long long> aligned_offset_list;
            vector<long long> aligned_length_list;
            vector<int> ids;
            for(int i=0; i<access_node_id_list_length; i++){
                int node_id = access_node_id_list[i];
                int empty_slot_index = -1;
                //找到空的槽
                for(int j=0; j<beamwidth_cur; j++){
                    if(onflight_id_list[j] == -1){
                        empty_slot_index = j;
                        break;
                    }
                }
                if(empty_slot_index < 0){
                    cout << "[Error]Graph::search_topk_pipe empty_slot_index error, value = " << empty_slot_index << endl;
                    exit(0);
                }
                onflight_id_list[empty_slot_index] = node_id;
                onflight_id_list_valid_count++;
                cur_buf_list.push_back(buf_list_pool[empty_slot_index]);

                aligned_offset_list.push_back(aligned_offset_in_file(get_offset_in_file(node_id)));
                aligned_length_list.push_back(aligned_size(node_size));
                ids.push_back(empty_slot_index);

                visited_list.add(node_id); //在这里就标记为已访问，否则将导致重复发起访问
                perf.add_trivial_value(q_id, 1, aligned_size(node_size)); //增加L2的IO大小统计
            }
            if(cur_buf_list.size() == 0){
                cout << "[Error]Graph::search_topk_pipe cur_buf_list.size() error, value = " << cur_buf_list.size() << endl;
                exit(0);
            }
            //发起请求
            if (!manager.submitRequests(cur_buf_list, aligned_offset_list, aligned_length_list, ids, access_node_id_list_length)) {
                std::cout << "Graph::search_topk_beam Failed to submit IO requests." << std::endl;
                exit(0);
            }
            perf.add_io_count(q_id, layer, access_node_id_list_length); //增加IO次数

            //4. 更新beamwidth_cur
            beamwidth_cur = (int)(beamwidth_cur * beamwidth_increase_factor);
            beamwidth_cur = std::min(beamwidth_cur, MAX_BEAMSEARCH_WIDTH);

            //更新IO计数
            io_count += access_node_id_list_length;
        }

        return 0;
    }

    Graph(){
    }
    ~Graph(){
        if(graph_load_mode == 0){
            for(int i=0; i<thread_count; i++){
                close(graph_fd_array[i]);
                free(buf_array[i]);
                for(size_t j=0; j<io_manager_buf_array_2d[i].size(); j++){
                    free(io_manager_buf_array_2d[i][j]); //用posix_memalign分配内存，用free释放内存
                }
            }
        }
    }
};

template <class T>
class ANNLite{
private:
    //global
    int n;
    int dim;
    int thread_count;
    int index_mode;
    int calculate_recall_of_layers;
    int l2_beamwidth_start;
    float beamwidth_increase_factor;
    int pipesearch_on;

    //L1
    Graph<T> L1_graph;
    Bin<int> L1_L2_mapping;
    Bin<int> L1_cached_list; //已经完全缓存了的节点，在L2开始的时候可以将里面的点标记为visited
    //L2
    Bin<CTYPE> L2_cluster_center;
    Graph<T> L2_graph;
    float l2_io_limit;
    float l2_update_ratio;
    //L3
    LastLayerVectors<T> last_layer_vectors;
    int l3_graph_load_in_batch;

    //L1L2召回率测试
    Bin<int> l1_gts;
    Bin<int> l2_gts;

    //预过滤
    int bloomfilter_size; //布隆过滤器的大小
    int bloomfilter_hash_count; //布隆过滤器的哈希函数数量
    BloomFilter* pre_filter;

public:
    ANNLite(json user_config, json dataset_config){  
        n = dataset_config["vector_count"];
        dim = dataset_config["dim"];
        thread_count = user_config["thread_count"];
        index_mode = dataset_config["index_mode"];
        l2_io_limit = user_config["l2_io_limit"];
        l2_update_ratio = user_config["l2_update_ratio"];
        calculate_recall_of_layers = user_config["calculate_recall_of_layers"]; //是否计算L1到L2的召回率
        if(calculate_recall_of_layers==1){
            l1_gts.init_with_file(string(user_config["index_root_path"])+string(dataset_config["l1_gt_path"])); //有可能会打开失败，因为不一定会生成该gt
            l2_gts.init_with_file(string(user_config["index_root_path"])+string(dataset_config["l2_gt_path"]));
        }
        l2_beamwidth_start = user_config["l2_beamwidth_start"];
        beamwidth_increase_factor = user_config["beamwidth_increase_factor"];
        pipesearch_on = user_config["pipesearch_on"];
        bloomfilter_size = user_config["bloomfilter_size"];
        bloomfilter_hash_count = user_config["bloomfilter_hash_count"];

        if(index_mode == 0){ //ANNLite
            //L1层
            L1_graph.init(user_config, dataset_config, string(user_config["index_root_path"])+string(dataset_config["l1_graph_path"]), "", 1); //全量加载常驻内存
            L1_L2_mapping.init_with_file(string(user_config["index_root_path"])+string(dataset_config["l1_l2_graph_node_mapping_path"]), thread_count, 1); //L1到L2的映射表,表不大全量加载
            L1_cached_list.init_with_file(string(user_config["index_root_path"])+string(dataset_config["l1_cachelist_path"]), thread_count, 1);
            pre_filter = new BloomFilter(bloomfilter_size, bloomfilter_hash_count);
            for(int i=0; i<L1_cached_list.get_n(); i++){
                pre_filter->add(L1_cached_list.get_line(i)[0]);
            }

            //L2层
            string l2_graph_path = string(user_config["index_root_path"])+string(dataset_config["l2_graph_root_path"]);
            string l2_vector_path = string(user_config["index_root_path"])+string(dataset_config["cluster_center_path"]);
            L2_graph.init(user_config, dataset_config, l2_graph_path, "", 0); //按需读取

            //L3层
            l3_graph_load_in_batch = user_config["l3_graph_load_in_batch"];
            last_layer_vectors.init(user_config, dataset_config);
        }
        else if(index_mode == 1){ //DiskANN
            //L2层
            string l2_vector_path = string(user_config["index_root_path"])+string(dataset_config["pq_vector_path"]);
            string l2_graph_path = string(user_config["index_root_path"])+string(dataset_config["l2_graph_root_path"]);
            L2_graph.init(user_config, dataset_config, l2_graph_path, l2_vector_path, 0, 1); //图按需读取，向量全量加载
        }
        else if(index_mode == 2){ //SPANN
            //L2层
            string l2_graph_path = string(user_config["index_root_path"])+string(dataset_config["l2_graph_root_path"]);
            L2_graph.init(user_config, dataset_config, l2_graph_path, "", 1); //全量加载

            //L3层
            l3_graph_load_in_batch = user_config["l3_graph_load_in_batch"];
            last_layer_vectors.init(user_config, dataset_config);
        }
         else if(index_mode == 3){ //AiSAQ
            //L2层
            string l2_graph_path = string(user_config["index_root_path"])+string(dataset_config["l2_graph_root_path"]);
            L2_graph.init(user_config, dataset_config, l2_graph_path, "", 0); //按需读取
        }
    }

    //ANNlite模式，对应0
    int search_single_annlite(int q_id, QuerySet<T>& queryset, int thread_id){
        T* q_vector = queryset.querys.get_line(q_id);

        //L1层*****************************************************
        auto l1_st = queryset.perf.get_time();
        MinList min_list_nprobe(queryset.last_layer_nprobe);
        L1_graph.search_topk(q_vector, min_list_nprobe, queryset.perf, q_id, 0, thread_id); //扫描L1图，获取入口点
        // min_list_nprobe.print();
        //计算召回率
        if(calculate_recall_of_layers==1){
            float l1_recall = queryset.compute_recall_by_outer_gts(q_id, l1_gts, min_list_nprobe);
            queryset.perf.set_recall_rate(q_id, 0, l1_recall); //最终召回率，写入到L3中
        }
        L1_L2_mapping.minlist_mapping(min_list_nprobe); //根据L1的入口点映射到L2的聚类中心ID
        auto l1_et = queryset.perf.get_time();
        queryset.perf.add_time(q_id, 1, queryset.perf.elapsed_time(l1_st, l1_et));
        //给当前结果进行快照，用于比对更新率
        // int* l1_result_snapshot = new int[min_list_nprobe.get_size()];
        // min_list_nprobe.write_id_to_list(l1_result_snapshot);
        #ifdef FAT_MINLIST_ENTRY
        //清空节点信息
        min_list_nprobe.reset_node();
        #endif

        //L2层，获取最近的聚类中心ID**********************************
        // MinList min_list_nprobe(queryset.last_layer_nprobe); //用来保存遍历聚类中心得到的ID的，因为目前聚类中心是没有使用接近中心点的向量来近似，里面的点都不是真实的数据集点，所以需要单开一个列表保存结果。
        auto l2_st = queryset.perf.get_time();
        // L2_cluster_center.scan_topk(q_vector, min_list_nprobe, thread_id); //扫描聚类中心，获取最近的聚类中心
        if(l2_beamwidth_start==0){
            // cout << "search_topk" << endl;
            L2_graph.search_topk(q_vector, min_list_nprobe, queryset.perf, q_id, 1, thread_id, l2_io_limit, l2_update_ratio, pre_filter); //扫描聚类中心，获取最近的聚类中心
        }
        else if(pipesearch_on == 0){
            // cout << "search_topk_beam" << endl;
            L2_graph.search_topk_beam(q_vector, min_list_nprobe, queryset.perf, q_id, 1, l2_beamwidth_start, beamwidth_increase_factor, thread_id, l2_io_limit, l2_update_ratio, pre_filter);
        }
        else if(pipesearch_on == 1){
            // cout << "search_topk_pipe" << endl;
            L2_graph.search_topk_pipe(q_vector, min_list_nprobe, queryset.perf, q_id, 1, l2_beamwidth_start, beamwidth_increase_factor, thread_id, l2_io_limit, l2_update_ratio, pre_filter);
        }
        auto l2_et = queryset.perf.get_time();
        queryset.perf.add_time(q_id, 4, queryset.perf.elapsed_time(l2_st, l2_et));
        // cout << q_id << ": " ;
        // min_list_nprobe.print();
        if(calculate_recall_of_layers==1){
            float l2_recall = queryset.compute_recall_by_outer_gts(q_id, l2_gts, min_list_nprobe);
            queryset.perf.set_recall_rate(q_id, 1, l2_recall); //最终召回率，写入到L3中
        }
        //这段往下可以注释
        #ifdef FAT_MINLIST_ENTRY
        int* l2_result_snapshot = new int[min_list_nprobe.get_size()];
        min_list_nprobe.write_id_to_list(l2_result_snapshot);
        //计算数组l1_result_snapshot和l2_result_snapshot的元素重复度
        int l1_l2_overlap_count = 0;
        for(int i=0; i<min_list_nprobe.get_size(); i++){
            for(int j=0; j<min_list_nprobe.get_size(); j++){
                if(l1_result_snapshot[i] == l2_result_snapshot[j]){
                    l1_l2_overlap_count++;
                    break;
                }
            }
        }
        float l1_l2_overlap_ratio = l1_l2_overlap_count * 1.0 / min_list_nprobe.get_size();
        queryset.perf.set_overlap_ratio(q_id, 0, l1_l2_overlap_ratio);
        //计算l2的访问价值，即对l1结果中的多少节点进行io是有意义的
        int* l2_result_parent_snapshot = new int[min_list_nprobe.get_size()];
        min_list_nprobe.write_parent_id_to_list(l2_result_parent_snapshot);
        //统计l2_result_parent_snapshot里面l1_result_snapshot的占比有多少
        int l2_result_parent_overlap_count = 0;
        for(int i=0; i<min_list_nprobe.get_size(); i++){
            for(int j=0; j<min_list_nprobe.get_size(); j++){
                if(l1_result_snapshot[j] == l2_result_parent_snapshot[i]){
                    l2_result_parent_overlap_count++;
                    break;
                }
            }
        }
        float l2_result_parent_overlap_ratio = l2_result_parent_overlap_count * 1.0 / min_list_nprobe.get_size();
        l2_result_parent_overlap_ratio += 1; //only for avoid warning
        // cout << "l2_result_parent_overlap_ratio: " << l2_result_parent_overlap_ratio << endl;
        // min_list_nprobe.print_all(l1_result_snapshot);
        //统计l1_result_snapshot有意义的下标最多到第几位
        int max_index = min_list_nprobe.get_max_root_node_id_index_in_last_layer(l1_result_snapshot);
        queryset.perf.set_trivial_value(q_id, 0, (float)max_index/min_list_nprobe.get_size());
        // cout << "max_index: " << max_index << " max_index_ratio: " << (float)max_index/min_list_nprobe.get_size() << endl;
        #endif
        
        //L3层，根据聚类中心从底层中取回向量****************************
        MinList min_list_final(queryset.real_k); //用来保存最终结果的
        //用同步IO
        auto l3_st = queryset.perf.get_time();
        if(!l3_graph_load_in_batch)
            last_layer_vectors.retrival_topk(q_vector, min_list_nprobe, min_list_final, queryset.perf, q_id, 2, thread_id); //根据输入的聚类中心ID，读取对应聚类的向量并返回topk
        //用异步IO（基于io_uring或者libaio）
        if(l3_graph_load_in_batch)
            last_layer_vectors.retrival_topk_batch(q_vector, min_list_nprobe, min_list_final, queryset.perf, q_id, 2, thread_id); //根据输入的聚类中心ID，读取对应聚类的向量并返回topk
        auto l3_et = queryset.perf.get_time();
        queryset.perf.add_time(q_id, 7, queryset.perf.elapsed_time(l3_st, l3_et));

        //将检索结果写入queryset中
        queryset.results.set_line_minlist(q_id, min_list_final);

        return 0;
    }

    //DiskANN模式，对应1
    int search_single_diskann(int q_id, QuerySet<T>& queryset, int thread_id){
        T* q_vector = queryset.querys.get_line(q_id);

        //L2层，获取最近的聚类中心ID**********************************
        MinList min_list_final(queryset.real_k); //用来保存最终结果的
        auto l2_st = queryset.perf.get_time();
        if(pipesearch_on == 0)
            L2_graph.search_topk_beam(q_vector, min_list_final, queryset.perf, q_id, 1, l2_beamwidth_start, beamwidth_increase_factor, thread_id, l2_io_limit, l2_update_ratio);
        else
            L2_graph.search_topk_pipe(q_vector, min_list_final, queryset.perf, q_id, 1, l2_beamwidth_start, beamwidth_increase_factor, thread_id, l2_io_limit, l2_update_ratio);
        auto l2_et = queryset.perf.get_time();
        queryset.perf.add_time(q_id, 4, queryset.perf.elapsed_time(l2_st, l2_et));

        if(calculate_recall_of_layers==1){
            float l2_recall = queryset.compute_recall_by_outer_gts(q_id, l2_gts, min_list_final);
            queryset.perf.set_recall_rate(q_id, 1, l2_recall); //最终召回率，写入到L3中
        }

        //将检索结果写入queryset中
        queryset.results.set_line_minlist(q_id, min_list_final);
        // queryset.results.print_line(q_id, INF);
        // min_list_final.print();

        return 0;
    }

    int search_single_spann(int q_id, QuerySet<T>& queryset, int thread_id){
        T* q_vector = queryset.querys.get_line(q_id);

        //L2层，获取最近的聚类中心ID**********************************
        MinList min_list_nprobe(queryset.last_layer_nprobe);
        auto l2_st = queryset.perf.get_time();
        L2_graph.search_topk(q_vector, min_list_nprobe, queryset.perf, q_id, 0, thread_id);
        auto l2_et = queryset.perf.get_time();
        queryset.perf.add_time(q_id, 1, queryset.perf.elapsed_time(l2_st, l2_et));

        if(calculate_recall_of_layers==1){
            float l2_recall = queryset.compute_recall_by_outer_gts(q_id, l2_gts, min_list_nprobe);
            queryset.perf.set_recall_rate(q_id, 1, l2_recall); //最终召回率，写入到L3中
        }

        //L3层，根据聚类中心从底层中取回向量****************************
        MinList min_list_final(queryset.real_k); //用来保存最终结果的
        //用同步IO
        auto l3_st = queryset.perf.get_time();
        if(!l3_graph_load_in_batch)
            last_layer_vectors.retrival_topk(q_vector, min_list_nprobe, min_list_final, queryset.perf, q_id, 2, thread_id); //根据输入的聚类中心ID，读取对应聚类的向量并返回topk
        //用异步IO（基于io_uring或者libaio）
        if(l3_graph_load_in_batch)
            last_layer_vectors.retrival_topk_batch(q_vector, min_list_nprobe, min_list_final, queryset.perf, q_id, 2, thread_id); //根据输入的聚类中心ID，读取对应聚类的向量并返回topk
        auto l3_et = queryset.perf.get_time();
        queryset.perf.add_time(q_id, 7, queryset.perf.elapsed_time(l3_st, l3_et));

        //将检索结果写入queryset中
        queryset.results.set_line_minlist(q_id, min_list_final);

        return 0;
    }

    int search_single_aisaq(int q_id, QuerySet<T>& queryset, int thread_id){
        return search_single_diskann(q_id, queryset, thread_id); //检索上与DiskANN一样
    }

    //对单个向量进行检索，并把结果写入queryset对应的某行中
    //只要qid不相同，不同的线程就不会写入同一块数据（这个函数不修改全局统计变量），就算没有锁也是线程安全的
    int search_single(int q_id, QuerySet<T>& queryset, int thread_id){
        // cout << "[Thread " << thread_id << "] Searching query " << q_id << endl;
        auto st = queryset.perf.get_time();

        if(index_mode == 0){ //ANNLite
            search_single_annlite(q_id, queryset, thread_id);
        }
        else if(index_mode == 1){ //DiskANN
            search_single_diskann(q_id, queryset, thread_id);
        }
        else if(index_mode == 2){ //SPANN
            search_single_spann(q_id, queryset, thread_id);
        }
        else if(index_mode == 3){ //AiSAQ
            search_single_aisaq(q_id, queryset, thread_id);
        }
        else{
            cout << "Error: index_mode is not supported" << endl;
            exit(0);
        }

        //写入总时间
        auto et = queryset.perf.get_time();
        queryset.perf.add_time(q_id, 0, queryset.perf.elapsed_time(st, et));

        return 0;
    }

    int search(QuerySet<T>& queryset) {
        cout << "Start searching, index mode: "<< index_mode<< endl;
        queryset.perf.mark_start();
        
        std::vector<std::thread> threads;
        int queries_per_thread = queryset.query_count / thread_count;
        
        for (int i = 0; i < thread_count; ++i) {
            int start = i * queries_per_thread;
            int end = (i == thread_count - 1) ? queryset.query_count : start + queries_per_thread;
            
            threads.emplace_back([this, start, end, i, &queryset]() {
                for (int q_id = start; q_id < end; ++q_id) {
                    search_single(q_id, queryset, i);
                }
            });
        }
        
        for (auto& t : threads) {
            t.join();
        }
        
        queryset.perf.mark_end();
        return 0;
    }

    ~ANNLite(){
    }
};