#include <mpi.h>
#include <queue>
#include <vector>
#include <algorithm>
#include <mutex>
#include <memory>
#include <cstdlib>
#include <arm_neon.h>
#include <limits>
#include <cassert>
#include <omp.h>

// 全局声明
extern MPI_Datatype PAIR_FLOAT_SIZE_T;

// 在全局范围添加
MPI_Datatype PAIR_FLOAT_SIZE_T = MPI_DATATYPE_NULL;

// 内存对齐分配器
template<typename T>
struct AlignedAllocator {
    using value_type = T;
    static constexpr size_t alignment = 16; // NEON要求16字节对齐

    AlignedAllocator() = default;
    template<class U> AlignedAllocator(const AlignedAllocator<U>&) {}

    T* allocate(size_t n) {
        if (n > std::numeric_limits<size_t>::max() / sizeof(T))
            throw std::bad_alloc();
        void* ptr = aligned_alloc(alignment, n * sizeof(T));

        if (!ptr) throw std::bad_alloc();
        return static_cast<T*>(ptr);
    }

    void deallocate(T* p, size_t) { free(p); }
};

template<typename T, typename U>
bool operator==(const AlignedAllocator<T>&, const AlignedAllocator<U>&) { return true; }

template<typename T, typename U>
bool operator!=(const AlignedAllocator<T>&, const AlignedAllocator<U>&) { return false; }

// 使用对齐分配器的vector
template<typename T>
using AlignedVector = std::vector<T, AlignedAllocator<T>>;

struct IVF_PQ_Index {
    std::vector<AlignedVector<float>> centroids;  // IVF聚类中心
    std::vector<std::vector<uint32_t>> lists;     // 倒排列表
    size_t nlist = 1024;                          // 聚类中心数量
    size_t sub_dim;                              // PQ子空间维度
    size_t m = 8;                                // PQ子空间数
    size_t ks = 256;                             // PQ每子空间簇数
    std::vector<AlignedVector<float>> pq_codebooks; // PQ码本
    std::vector<std::vector<uint8_t>> pq_codes;    // PQ编码
    std::vector<AlignedVector<float>> centroid_norms; // 聚类中心L2范数
    std::vector<AlignedVector<float>> pq_norms;    // PQ码本L2范数
    bool initialized = false;
};

static IVF_PQ_Index ivf_pq_index;
static std::mutex init_mutex;

// 创建MPI数据类型
MPI_Datatype create_pair_float_size_t() {
    MPI_Datatype mpi_pair_type;
    int blocklengths[2] = {1, 1};
    MPI_Datatype types[2] = {MPI_FLOAT, MPI_UINT32_T}; 
    MPI_Aint offsets[2];

    // 计算偏移量（确保跨平台兼容性）
    struct Pair { float first; size_t second; } dummy;
    MPI_Get_address(&dummy.first, &offsets[0]);
    MPI_Get_address(&dummy.second, &offsets[1]);
    offsets[1] -= offsets[0];
    offsets[0] = 0;

    MPI_Type_create_struct(2, blocklengths, offsets, types, &mpi_pair_type);
    MPI_Type_commit(&mpi_pair_type);
    return mpi_pair_type;
}

// NEON优化的内积计算,no mistake
float compute_inner_product_neon(const float* a, const float* b, size_t size) {
    assert(((uintptr_t)a & 0xF) == 0 && "Pointer a not 16-byte aligned");
    assert(((uintptr_t)b & 0xF) == 0 && "Pointer b not 16-byte aligned");

    float32x4_t sum = vdupq_n_f32(0.0f);
    size_t i = 0;
    for (; i + 4 <= size; i += 4) {
        float32x4_t va = vld1q_f32(a + i);
        float32x4_t vb = vld1q_f32(b + i);
        sum = vmlaq_f32(sum, va, vb);
    }
    float result = vaddvq_f32(sum);
    for (; i < size; ++i) {
        result += a[i] * b[i];
    }
    return result;
}


// k-means++ 初始化聚类中心
void kmeans_plusplus_init(const float* data, 
                        size_t data_size, 
                        size_t vecdim, 
                        size_t nlist,
                        std::vector<AlignedVector<float>>& centroids) {
    std::mt19937 rng(std::random_device{}());
    centroids.clear();
    centroids.resize(nlist);
    for (auto& c : centroids) c.resize(vecdim);

    // 随机选择第一个中心
    size_t first_idx = rng() % (data_size / vecdim);
    for (size_t i = 0; i < vecdim; ++i) {
        centroids[0][i] = data[first_idx * vecdim + i];
    }

    // 依次选择后续中心（改为使用内积最大值）
    for (size_t k = 1; k < nlist; ++k) {
        std::vector<float> similarities(data_size / vecdim, 0.0f);
        float total_sim = 0.0f;

        #pragma omp parallel for
        for (size_t i = 0; i < data_size / vecdim; ++i) {
            float max_sim = -std::numeric_limits<float>::max();
            for (size_t j = 0; j < k; ++j) {
                float sim = compute_inner_product_neon(&data[i * vecdim], centroids[j].data(), vecdim);
                if (sim > max_sim) max_sim = sim;
            }
            similarities[i] = max_sim;
            total_sim += max_sim;
        }

        // 按概率选择新中心（相似性越高概率越大）
        std::discrete_distribution<> dist_prob(similarities.begin(), similarities.end());
        size_t new_idx = dist_prob(rng);
        for (size_t i = 0; i < vecdim; ++i) {
            centroids[k][i] = data[new_idx * vecdim + i];
        }
    }
}

// k-means 聚类
void kmeans(const float* data, size_t data_size, size_t nlist, size_t vecdim, size_t max_iter, 
           std::vector<AlignedVector<float>>& centroids, std::vector<size_t>& assignments) {
    assignments.resize(data_size / vecdim);
    std::vector<std::vector<size_t>> cluster_points(nlist);

    // 初始化中心
    kmeans_plusplus_init(data, data_size, nlist, vecdim, centroids);

    for (size_t iter = 0; iter < max_iter; ++iter) {
        cluster_points.clear();
        cluster_points.resize(nlist);

        // 分配向量到最近中心（使用内积最大值）
        #pragma omp parallel for
        for (size_t i = 0; i < data_size / vecdim; ++i) {
            float max_ip = -std::numeric_limits<float>::max();
            size_t best_k = 0;
            for (size_t k = 0; k < nlist; ++k) {
                float ip = compute_inner_product_neon(&data[i * vecdim], centroids[k].data(), vecdim);
                if (ip > max_ip) {
                    max_ip = ip;
                    best_k = k;
                }
            }
            assignments[i] = best_k;
            cluster_points[best_k].push_back(i);
        }

        // 更新中心（保持不变）
        bool converged = true;
        for (size_t k = 0; k < nlist; ++k) {
            if (cluster_points[k].empty()) continue;

            AlignedVector<float> new_centroid(vecdim, 0.0f);
            for (size_t idx : cluster_points[k]) {
                for (size_t i = 0; i < vecdim; ++i) {
                    new_centroid[i] += data[idx * vecdim + i];
                }
            }
            for (size_t i = 0; i < vecdim; ++i) {
                new_centroid[i] /= cluster_points[k].size();
                if (std::abs(new_centroid[i] - centroids[k][i]) > 1e-5f) {
                    converged = false;
                }
                centroids[k][i] = new_centroid[i];
            }
        }

        if (converged) break;
    }
}

// 训练 PQ 码本
// 修正后的PQ训练函数
void train_pq(const float* data, const std::vector<std::vector<size_t>>& cluster_assignments, 
             size_t m, size_t ks, size_t vecdim, std::vector<AlignedVector<float>>& pq_codebooks) {
    size_t sub_dim = vecdim / m;
    pq_codebooks.resize(m);
    
    #pragma omp parallel for
    for (size_t i = 0; i < m; ++i) {
        // 收集所有子向量
        std::vector<AlignedVector<float>> sub_vectors;
        for (const auto& cluster : cluster_assignments) {
            for (size_t idx : cluster) {
                AlignedVector<float> sub_vec(sub_dim);
                std::copy_n(data + idx * vecdim + i * sub_dim, sub_dim, sub_vec.data());
                sub_vectors.push_back(sub_vec);
            }
        }
        
        // 训练子码本
        std::vector<AlignedVector<float>> codebook(ks);
        std::vector<size_t> assignments(sub_vectors.size());
        kmeans(sub_vectors[0].data(), sub_vectors[0].size(), ks, sub_dim, 100, codebook, assignments);
        
        // 存储码本
        pq_codebooks[i].resize(ks * sub_dim);
        for (size_t j = 0; j < ks; ++j) {
            std::copy_n(codebook[j].data(), sub_dim, &pq_codebooks[i][j * sub_dim]);
        }
    }
}

// 对向量进行 PQ 编码
void pq_encode(const float* vec, size_t m, size_t sub_dim, 
              const std::vector<AlignedVector<float>>& pq_codebooks, 
              std::vector<uint8_t>& code) {
    code.resize(m);
    for (size_t i = 0; i < m; ++i) {
        size_t offset = i * sub_dim;
        float max_ip = -std::numeric_limits<float>::max();
        size_t best_code = 0;
        for (size_t j = 0; j < pq_codebooks[i].size() / sub_dim; ++j) {
            float ip = compute_inner_product_neon(&vec[offset], &pq_codebooks[i][j * sub_dim], sub_dim);
            if (ip > max_ip) {
                max_ip = ip;
                best_code = j;
            }
        }
        code[i] = static_cast<uint8_t>(best_code);
    }
}
// 初始化 IVF-PQ 索引
void init_ivf_pq(const float* data, 
                size_t data_size,
                size_t nlist,
                size_t vecdim,
                size_t max_iter) {
    std::lock_guard<std::mutex> lock(init_mutex);

    if (!data || data_size == 0 || vecdim == 0) {
        std::cerr << "Invalid input data for IVF-PQ initialization" << std::endl;
        MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
    }

    if (ivf_pq_index.initialized) return;

    // 动态调整参数（关键修改点）
    ivf_pq_index.nlist = std::min<size_t>(1024, data_size / 100);  // 每100个数据点一个聚类中心
    ivf_pq_index.m = 8;  // 固定为8个子空间
    ivf_pq_index.ks = 256; // 每子空间256个簇
    
    // 检查维度是否可被m整除
    if (vecdim % ivf_pq_index.m != 0) {
        throw std::runtime_error("Vector dimension " + std::to_string(vecdim) + 
                               " must be divisible by m (" + std::to_string(ivf_pq_index.m) + ")");
    }
    ivf_pq_index.sub_dim = vecdim / ivf_pq_index.m;

    // 1. 使用 k-means 聚类训练 IVF 部分
    ivf_pq_index.centroids.resize(ivf_pq_index.nlist);
    std::vector<size_t> cluster_assignments(data_size, 0);
    
    // 使用对齐的临时存储
    AlignedVector<float> aligned_data(data, data + data_size * vecdim);
    kmeans(data, data_size, nlist, vecdim, max_iter, ivf_pq_index.centroids, cluster_assignments);

    // 2. 分配向量到最近的聚类中心
    ivf_pq_index.lists.resize(ivf_pq_index.nlist);
    for (size_t i = 0; i < data_size; ++i) {
        ivf_pq_index.lists[cluster_assignments[i]].push_back(i);
    }

    // 3. 计算聚类中心的 L2 范数
    ivf_pq_index.centroid_norms.resize(ivf_pq_index.nlist);
    for (auto& norm : ivf_pq_index.centroid_norms) norm.resize(1);
    #pragma omp parallel for
    for (size_t i = 0; i < ivf_pq_index.nlist; ++i) {
        float norm = 0.0f;
        for (size_t j = 0; j < vecdim; ++j) {
            norm += ivf_pq_index.centroids[i][j] * ivf_pq_index.centroids[i][j];
        }
        ivf_pq_index.centroid_norms[i][0] = std::sqrt(norm);
    }

    // 4. 训练 PQ 编码器
    ivf_pq_index.pq_codebooks.resize(ivf_pq_index.m);
    for (size_t i = 0; i < ivf_pq_index.m; ++i) {
        ivf_pq_index.pq_codebooks[i].resize(ivf_pq_index.ks * ivf_pq_index.sub_dim);
    }

    for (size_t cluster_id = 0; cluster_id < ivf_pq_index.nlist; ++cluster_id) {
        std::vector<std::vector<size_t>> temp_cluster_assignments(ivf_pq_index.nlist);
        for (size_t i = 0; i < cluster_assignments.size(); ++i) {
            temp_cluster_assignments[cluster_assignments[i]].push_back(i);
        }
        train_pq(aligned_data.data(), temp_cluster_assignments, ivf_pq_index.m, ivf_pq_index.ks, vecdim, ivf_pq_index.pq_codebooks);
    }

    // 5. 计算 PQ 码本的 L2 范数
    ivf_pq_index.pq_norms.resize(ivf_pq_index.m);
    for (size_t i = 0; i < ivf_pq_index.m; ++i) {
        ivf_pq_index.pq_norms[i].resize(ivf_pq_index.ks);
        for (size_t j = 0; j < ivf_pq_index.ks; ++j) {
            float norm = 0.0f;
            for (size_t k = 0; k < ivf_pq_index.sub_dim; ++k) {
                norm += ivf_pq_index.pq_codebooks[i][j * ivf_pq_index.sub_dim + k] *
                        ivf_pq_index.pq_codebooks[i][j * ivf_pq_index.sub_dim + k];
            }
            ivf_pq_index.pq_norms[i][j] = std::sqrt(norm);
        }
    }

    // 6. 对所有向量进行 PQ 编码
    ivf_pq_index.pq_codes.resize(data_size);
    for (size_t i = 0; i < data_size; ++i) {
        pq_encode(&aligned_data[i * vecdim], ivf_pq_index.m, ivf_pq_index.sub_dim, ivf_pq_index.pq_codebooks, ivf_pq_index.pq_codes[i]);
    }

    ivf_pq_index.initialized = true;
}

// 广播IVF-PQ索引到所有进程
void broadcast_ivf_pq_index(int root) {
    int rank;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    // 广播元数据
    size_t meta_data[4];
    if (rank == root) {
        meta_data[0] = ivf_pq_index.nlist;
        meta_data[1] = ivf_pq_index.sub_dim;
        meta_data[2] = ivf_pq_index.m;
        meta_data[3] = ivf_pq_index.ks;
    }
    MPI_Bcast(meta_data, 4, MPI_UNSIGNED_LONG, root, MPI_COMM_WORLD);

    if (rank != root) {
        ivf_pq_index.nlist = meta_data[0];
        ivf_pq_index.sub_dim = meta_data[1];
        ivf_pq_index.m = meta_data[2];
        ivf_pq_index.ks = meta_data[3];
    }

    // 广播聚类中心
    if (rank != root) {
        ivf_pq_index.centroids.resize(ivf_pq_index.nlist);
        for (auto& centroid : ivf_pq_index.centroids) {
            centroid.resize(ivf_pq_index.sub_dim * ivf_pq_index.m);
        }
    }
    
    for (auto& centroid : ivf_pq_index.centroids) {
        MPI_Bcast(centroid.data(), centroid.size(), MPI_FLOAT, root, MPI_COMM_WORLD);
    }

    // 广播PQ码本
    if (rank != root) {
        ivf_pq_index.pq_codebooks.resize(ivf_pq_index.m);
        for (auto& codebook : ivf_pq_index.pq_codebooks) {
            codebook.resize(ivf_pq_index.ks * ivf_pq_index.sub_dim);
        }
    }
    
    for (auto& codebook : ivf_pq_index.pq_codebooks) {
        MPI_Bcast(codebook.data(), codebook.size(), MPI_FLOAT, root, MPI_COMM_WORLD);
    }

    // 注意：倒排列表和PQ编码在实际应用中可能太大，不适合广播
    // 这里简化处理，实际应用中应该考虑分布式存储
    
    if (rank != root) {
        ivf_pq_index.initialized = true;
    }
}

// 广播倒排列表
void broadcast_inverted_lists(int root) {
    int rank;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    // 广播列表大小
    std::vector<size_t> list_sizes(ivf_pq_index.nlist);
    if (rank == root) {
        for (size_t i = 0; i < ivf_pq_index.nlist; ++i) {
            list_sizes[i] = ivf_pq_index.lists[i].size();
        }
    }
    MPI_Bcast(list_sizes.data(), ivf_pq_index.nlist, MPI_UNSIGNED_LONG, root, MPI_COMM_WORLD);

    // 非主进程调整内存
    if (rank != root) {
        ivf_pq_index.lists.resize(ivf_pq_index.nlist);
        for (size_t i = 0; i < ivf_pq_index.nlist; ++i) {
            ivf_pq_index.lists[i].resize(list_sizes[i]);
        }
    }

    // 广播列表数据
    for (size_t i = 0; i < ivf_pq_index.nlist; ++i) {
        MPI_Bcast(ivf_pq_index.lists[i].data(), list_sizes[i], MPI_UINT32_T, root, MPI_COMM_WORLD);
    }
}

// 广播PQ编码到所有进程
void broadcast_pq_codes(int root, size_t data_size) {
    int rank;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    // 广播PQ编码大小
    size_t pq_codes_size = data_size;
    MPI_Bcast(&pq_codes_size, 1, MPI_UNSIGNED_LONG, root, MPI_COMM_WORLD);

    // 非主进程调整内存
    if (rank != root) {
        ivf_pq_index.pq_codes.resize(pq_codes_size);
    }

    // 广播每个PQ编码
    for (size_t i = 0; i < pq_codes_size; ++i) {
        size_t code_size = ivf_pq_index.m; // 每个编码有m个uint8_t
        MPI_Bcast(&code_size, 1, MPI_UNSIGNED_LONG, root, MPI_COMM_WORLD);

        if (rank != root) {
            ivf_pq_index.pq_codes[i].resize(code_size);
        }

        MPI_Bcast(ivf_pq_index.pq_codes[i].data(), code_size, MPI_UNSIGNED_CHAR, root, MPI_COMM_WORLD);
    }
}

// 修改距离计算方式
float fast_pq_inner_product(const float* query, const std::vector<uint8_t>& code,
                           const IVF_PQ_Index& index) {
    float result = 0.0f;
    for (size_t m = 0; m < index.m; ++m) {
        size_t sub_idx = code[m];
        const float* centroid = &index.centroids[m][sub_idx * index.sub_dim];
        result += compute_inner_product_neon(query + m * index.sub_dim, 
                                          centroid, 
                                          index.sub_dim);
    }
    return result;
}

// 修改后的搜索函数 - MPI版本
std::vector<std::pair<float, uint32_t>> mpi_ivf_pq_search(
    const float* query, const float* base_data, size_t vecdim, size_t top_k, 
    size_t nprobe = 10, size_t rerank_k = 200) {

    int rank, size;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    // 确保 MPI 数据类型已初始化
    if (PAIR_FLOAT_SIZE_T == MPI_DATATYPE_NULL) {
        PAIR_FLOAT_SIZE_T = create_pair_float_size_t();
        MPI_Type_commit(&PAIR_FLOAT_SIZE_T);
    }

    // === 调试输出：验证输入数据 ===
    if (rank == 0 && ivf_pq_index.lists.empty()) {
        std::cerr << "Error: Inverted lists are empty!" << std::endl;
    }

    // 1. 对齐查询向量（确保内存对齐）
    AlignedVector<float> aligned_query(vecdim);
    std::copy_n(query, vecdim, aligned_query.data());
    MPI_Bcast(aligned_query.data(), vecdim, MPI_FLOAT, 0, MPI_COMM_WORLD);

    // 2. 分布式计算聚类中心评分（使用内积）
    size_t start = (ivf_pq_index.nlist / size) * rank;
    size_t end = (rank == size - 1) ? ivf_pq_index.nlist : start + (ivf_pq_index.nlist / size);
    std::vector<std::pair<float, size_t>> local_centroid_scores;
    
    for (size_t i = start; i < end; ++i) {
        if (ivf_pq_index.lists[i].empty()) continue;
        float ip = compute_inner_product_neon(aligned_query.data(), ivf_pq_index.centroids[i].data(), vecdim);
        local_centroid_scores.emplace_back(ip, i);
    }


    // 3. 收集所有聚类中心评分到主进程
    std::vector<std::pair<float, size_t>> global_centroid_scores;
    if (rank == 0) {
        global_centroid_scores.resize(ivf_pq_index.nlist);
    }

    // 收集各进程的结果数量
    int local_size = static_cast<int>(local_centroid_scores.size());
    std::vector<int> recv_counts(size);
    MPI_Gather(&local_size, 1, MPI_INT, recv_counts.data(), 1, MPI_INT, 0, MPI_COMM_WORLD);

    // 主进程收集结果
    if (rank == 0) {
        std::vector<int> displs(size, 0);
        for (int i = 1; i < size; ++i) {
            displs[i] = displs[i-1] + recv_counts[i-1];
        }
        MPI_Gatherv(
            local_centroid_scores.data(), local_size, PAIR_FLOAT_SIZE_T,
            global_centroid_scores.data(), recv_counts.data(), displs.data(),
            PAIR_FLOAT_SIZE_T, 0, MPI_COMM_WORLD
        );
    } else {
        MPI_Gatherv(
            local_centroid_scores.data(), local_size, PAIR_FLOAT_SIZE_T,
            nullptr, nullptr, nullptr, PAIR_FLOAT_SIZE_T, 0, MPI_COMM_WORLD
        );
    }

    // 4. 主进程排序并广播 top-nprobe 聚类
    std::vector<size_t> top_clusters;
    if (rank == 0) {
        std::sort(
            global_centroid_scores.begin(), global_centroid_scores.end(),
            std::greater<std::pair<float, size_t>>()
        );
        for (size_t i = 0; i < std::min(nprobe, global_centroid_scores.size()); ++i) {
            top_clusters.push_back(global_centroid_scores[i].second);
        }
    }

    // 广播聚类数量和数据
    size_t cluster_count = top_clusters.size();
    MPI_Bcast(&cluster_count, 1, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD);
    if (rank != 0) top_clusters.resize(cluster_count);
    MPI_Bcast(top_clusters.data(), cluster_count, MPI_UNSIGNED_LONG, 0, MPI_COMM_WORLD);

    // 5. 分布式处理候选向量（按聚类分配）
    std::vector<uint32_t> local_candidates;
    for (size_t cluster_id : top_clusters) {
        const auto& list = ivf_pq_index.lists[cluster_id];
        size_t chunk_size = list.size() / size;
        size_t start_idx = rank * chunk_size;
        size_t end_idx = (rank == size - 1) ? list.size() : start_idx + chunk_size;
        local_candidates.insert(
            local_candidates.end(),
            list.begin() + start_idx,
            list.begin() + end_idx
        );
    }

    // 6. 本地精确计算内积（跳过 PQ 近似）
    std::vector<std::pair<float, uint32_t>> local_results;
    for (uint32_t idx : local_candidates) {
        float ip = compute_inner_product_neon(
            aligned_query.data(),
            base_data + idx * vecdim,
            vecdim
        );
        local_results.emplace_back(ip, idx);
    }

    // 7. 本地排序并截断
    std::sort(
        local_results.begin(), local_results.end(),
        std::greater<std::pair<float, uint32_t>>()
    );
    if (local_results.size() > rerank_k / size) {
        local_results.resize(rerank_k / size);
    }

    // 8. 收集全局结果到主进程
    std::vector<std::pair<float, uint32_t>> global_results;
    if (rank == 0) {
        global_results = local_results;
        std::vector<std::pair<float, uint32_t>> temp_buf;
        for (int src = 1; src < size; ++src) {
            int recv_size;
            MPI_Recv(&recv_size, 1, MPI_INT, src, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
            if (recv_size > 0) {
                temp_buf.resize(recv_size);
                MPI_Recv(
                    temp_buf.data(), recv_size, PAIR_FLOAT_SIZE_T,
                    src, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE
                );
                global_results.insert(global_results.end(), temp_buf.begin(), temp_buf.end());
            }
        }
    } else {
        int send_size = static_cast<int>(local_results.size());
        MPI_Send(&send_size, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
        if (send_size > 0) {
            MPI_Send(
                local_results.data(), send_size, PAIR_FLOAT_SIZE_T,
                0, 0, MPI_COMM_WORLD
            );
        }
    }

    // 9. 主进程合并并返回最终结果
    if (rank == 0) {
        std::sort(
            global_results.begin(), global_results.end(),
            std::greater<std::pair<float, uint32_t>>()
        );
        if (global_results.size() > top_k) {
            global_results.resize(top_k);
        }
        return global_results;
    }
    return {};
}

// MPI初始化函数
// 在初始化函数中确保数据完整分发
void initialize_mpi_ivf_pq(int argc, char** argv, const float* base, 
                         size_t base_number, size_t vecdim) {

    int rank;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    
    if (rank == 0) {
        size_t nlist = 1024; // 或根据需求设置
        size_t max_iter = 100; // 典型值
        init_ivf_pq(base, base_number, nlist, vecdim, max_iter);
    }
    
    // 广播基本索引结构
    broadcast_ivf_pq_index(0);
    
    // 广播倒排列表
    broadcast_inverted_lists(0);
    
    // 广播PQ编码 - 需要实现这个函数
    broadcast_pq_codes(0, base_number);
    
    MPI_Barrier(MPI_COMM_WORLD);
}